scrapy / scrapy
1
"""
2
Scrapy Shell
3

4
See documentation in docs/topics/shell.rst
5
"""
6 7
from threading import Thread
7

8 7
from scrapy.commands import ScrapyCommand
9 7
from scrapy.http import Request
10 7
from scrapy.shell import Shell
11 7
from scrapy.utils.spider import spidercls_for_request, DefaultSpider
12 7
from scrapy.utils.url import guess_scheme
13

14

15 7
class Command(ScrapyCommand):
16

17 7
    requires_project = False
18 7
    default_settings = {
19
        'KEEP_ALIVE': True,
20
        'LOGSTATS_INTERVAL': 0,
21
        'DUPEFILTER_CLASS': 'scrapy.dupefilters.BaseDupeFilter',
22
    }
23

24 7
    def syntax(self):
25 7
        return "[url|file]"
26

27 7
    def short_desc(self):
28 0
        return "Interactive scraping console"
29

30 7
    def long_desc(self):
31 7
        return ("Interactive console for scraping the given url or file. "
32
                "Use ./file.html syntax or full path for local file.")
33

34 7
    def add_options(self, parser):
35 7
        ScrapyCommand.add_options(self, parser)
36 7
        parser.add_option("-c", dest="code",
37
                          help="evaluate the code in the shell, print the result and exit")
38 7
        parser.add_option("--spider", dest="spider",
39
                          help="use this spider")
40 7
        parser.add_option("--no-redirect", dest="no_redirect", action="store_true", default=False,
41
                          help="do not handle HTTP 3xx status codes and print response as-is")
42

43 7
    def update_vars(self, vars):
44
        """You can use this function to update the Scrapy objects that will be
45
        available in the shell
46
        """
47 7
        pass
48

49 7
    def run(self, args, opts):
50 7
        url = args[0] if args else None
51 7
        if url:
52
            # first argument may be a local file
53 7
            url = guess_scheme(url)
54

55 7
        spider_loader = self.crawler_process.spider_loader
56

57 7
        spidercls = DefaultSpider
58 7
        if opts.spider:
59 0
            spidercls = spider_loader.load(opts.spider)
60 7
        elif url:
61 7
            spidercls = spidercls_for_request(spider_loader, Request(url),
62
                                              spidercls, log_multiple=True)
63

64
        # The crawler is created this way since the Shell manually handles the
65
        # crawling engine, so the set up in the crawl method won't work
66 7
        crawler = self.crawler_process._create_crawler(spidercls)
67
        # The Shell class needs a persistent engine in the crawler
68 7
        crawler.engine = crawler._create_engine()
69 7
        crawler.engine.start()
70

71 7
        self._start_crawler_thread()
72

73 7
        shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)
74 7
        shell.start(url=url, redirect=not opts.no_redirect)
75

76 7
    def _start_crawler_thread(self):
77 7
        t = Thread(target=self.crawler_process.start,
78
                   kwargs={'stop_after_crawl': False})
79 7
        t.daemon = True
80 7
        t.start()

Read our documentation on viewing source code .

Loading