desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'>>> MediaPipeline()._key_for_pipe("IMAGES") \'IMAGES\' >>> class MyPipe(MediaPipeline): ... pass >>> MyPipe()._key_for_pipe("IMAGES", base_class_name="MediaPipeline") \'MYPIPE_IMAGES\''
def _key_for_pipe(self, key, base_class_name=None, settings=None):
class_name = self.__class__.__name__ formatted_key = '{}_{}'.format(class_name.upper(), key) if ((class_name == base_class_name) or (not base_class_name) or (settings and (not settings.get(formatted_key)))): return key return formatted_key
'Check request before starting download'
def media_to_download(self, request, info):
pass
'Returns the media requests to download'
def get_media_requests(self, item, info):
pass
'Handler for success downloads'
def media_downloaded(self, response, request, info):
return response
'Handler for failed downloads'
def media_failed(self, failure, request, info):
return failure
'Called per item when all media requests has been processed'
def item_completed(self, results, item, info):
if self.LOG_FAILED_RESULTS: for (ok, value) in results: if (not ok): logger.error('%(class)s found errors processing %(item)s', {'class': self.__class__.__name__, 'item': item}, exc_info=failure_to_exc_info(value), extra={'spider': info.spider}) return item
'Run a crawler with the provided arguments. It will call the given Crawler\'s :meth:`~Crawler.crawl` method, while keeping track of it so it can be stopped later. If `crawler_or_spidercls` isn\'t a :class:`~scrapy.crawler.Crawler` instance, this method will try to create one using this parameter as the spider class given to it. Returns a deferred that is fired when the crawling is finished. :param crawler_or_spidercls: already created crawler, or a spider class or spider\'s name inside the project to create it :type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance, :class:`~scrapy.spiders.Spider` subclass or string :param list args: arguments to initialize the spider :param dict kwargs: keyword arguments to initialize the spider'
def crawl(self, crawler_or_spidercls, *args, **kwargs):
crawler = self.create_crawler(crawler_or_spidercls) return self._crawl(crawler, *args, **kwargs)
'Return a :class:`~scrapy.crawler.Crawler` object. * If `crawler_or_spidercls` is a Crawler, it is returned as-is. * If `crawler_or_spidercls` is a Spider subclass, a new Crawler is constructed for it. * If `crawler_or_spidercls` is a string, this function finds a spider with this name in a Scrapy project (using spider loader), then creates a Crawler instance for it.'
def create_crawler(self, crawler_or_spidercls):
if isinstance(crawler_or_spidercls, Crawler): return crawler_or_spidercls return self._create_crawler(crawler_or_spidercls)
'Stops simultaneously all the crawling jobs taking place. Returns a deferred that is fired when they all have ended.'
def stop(self):
return defer.DeferredList([c.stop() for c in list(self.crawlers)])
'join() Returns a deferred that is fired when all managed :attr:`crawlers` have completed their executions.'
@defer.inlineCallbacks def join(self):
while self._active: (yield defer.DeferredList(self._active))
'This method starts a Twisted `reactor`_, adjusts its pool size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`. If `stop_after_crawl` is True, the reactor will be stopped after all crawlers have finished, using :meth:`join`. :param boolean stop_after_crawl: stop or not the reactor when all crawlers have finished'
def start(self, stop_after_crawl=True):
if stop_after_crawl: d = self.join() if d.called: return d.addBoth(self._stop_reactor) reactor.installResolver(self._get_dns_resolver()) tp = reactor.getThreadPool() tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE')) reactor.addSystemEventTrigger('before', 'shutdown', self.stop) reactor.run(installSignalHandlers=False)
'Return the most appropriate Response class for the given mimetype'
def from_mimetype(self, mimetype):
if (mimetype is None): return Response elif (mimetype in self.classes): return self.classes[mimetype] else: basetype = ('%s/*' % mimetype.split('/')[0]) return self.classes.get(basetype, Response)
'Return the most appropriate Response class from an HTTP Content-Type header'
def from_content_type(self, content_type, content_encoding=None):
if content_encoding: return Response mimetype = to_native_str(content_type).split(';')[0].strip().lower() return self.from_mimetype(mimetype)
'Return the most appropriate Response class by looking at the HTTP headers'
def from_headers(self, headers):
cls = Response if ('Content-Type' in headers): cls = self.from_content_type(content_type=headers['Content-type'], content_encoding=headers.get('Content-Encoding')) if ((cls is Response) and ('Content-Disposition' in headers)): cls = self.from_content_disposition(headers['Content-Disposition']) return cls
'Return the most appropriate Response class from a file name'
def from_filename(self, filename):
(mimetype, encoding) = self.mimetypes.guess_type(filename) if (mimetype and (not encoding)): return self.from_mimetype(mimetype) else: return Response
'Try to guess the appropriate response based on the body content. This method is a bit magic and could be improved in the future, but it\'s not meant to be used except for special cases where response types cannot be guess using more straightforward methods.'
def from_body(self, body):
chunk = body[:5000] chunk = to_bytes(chunk) if (not binary_is_text(chunk)): return self.from_mimetype('application/octet-stream') elif ('<html>' in chunk.lower()): return self.from_mimetype('text/html') elif ('<?xml' in chunk.lower()): return self.from_mimetype('text/xml') else: return self.from_mimetype('text')
'Guess the most appropriate Response class based on the given arguments.'
def from_args(self, headers=None, url=None, filename=None, body=None):
cls = Response if (headers is not None): cls = self.from_headers(headers) if ((cls is Response) and (url is not None)): cls = self.from_filename(url) if ((cls is Response) and (filename is not None)): cls = self.from_filename(filename) if ((cls is Response) and (body is not None)): cls = self.from_body(body) return cls
'stop the request from returning objects and records any errors'
def _clean_req(self, request, method, results):
cb = request.callback @wraps(cb) def cb_wrapper(response): try: output = cb(response) output = list(iterate_spider_output(output)) except: case = _create_testcase(method, 'callback') results.addError(case, sys.exc_info()) def eb_wrapper(failure): case = _create_testcase(method, 'errback') exc_info = (failure.value, failure.type, failure.getTracebackObject()) results.addError(case, exc_info) request.callback = cb_wrapper request.errback = eb_wrapper
'Return the Spider class for the given spider name. If the spider name is not found, raise a KeyError.'
def load(self, spider_name):
try: return self._spiders[spider_name] except KeyError: raise KeyError('Spider not found: {}'.format(spider_name))
'Return the list of spider names that can handle the given request.'
def find_by_request(self, request):
return [name for (name, cls) in self._spiders.items() if cls.handles_request(request)]
'Return a list with the names of all spiders available in the project.'
def list(self):
return list(self._spiders.keys())
'Sets value if priority is higher or equal than current priority.'
def set(self, value, priority):
if (priority >= self.priority): if isinstance(self.value, BaseSettings): value = BaseSettings(value, priority=priority) self.value = value self.priority = priority
'Get a setting value without affecting its original type. :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any'
def get(self, name, default=None):
return (self[name] if (self[name] is not None) else default)
'Get a setting value as a boolean. ``1``, ``\'1\'``, `True`` and ``\'True\'`` return ``True``, while ``0``, ``\'0\'``, ``False``, ``\'False\'`` and ``None`` return ``False``. For example, settings populated through environment variables set to ``\'0\'`` will return ``False`` when using this method. :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any'
def getbool(self, name, default=False):
got = self.get(name, default) try: return bool(int(got)) except ValueError: if (got in ('True', 'true')): return True if (got in ('False', 'false')): return False raise ValueError("Supported values for boolean settings are 0/1, True/False, '0'/'1', 'True'/'False' and 'true'/'false'")
'Get a setting value as an int. :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any'
def getint(self, name, default=0):
return int(self.get(name, default))
'Get a setting value as a float. :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any'
def getfloat(self, name, default=0.0):
return float(self.get(name, default))
'Get a setting value as a list. If the setting original type is a list, a copy of it will be returned. If it\'s a string it will be split by ",". For example, settings populated through environment variables set to ``\'one,two\'`` will return a list [\'one\', \'two\'] when using this method. :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any'
def getlist(self, name, default=None):
value = self.get(name, (default or [])) if isinstance(value, six.string_types): value = value.split(',') return list(value)
'Get a setting value as a dictionary. If the setting original type is a dictionary, a copy of it will be returned. If it is a string it will be evaluated as a JSON dictionary. In the case that it is a :class:`~scrapy.settings.BaseSettings` instance itself, it will be converted to a dictionary, containing all its current settings values as they would be returned by :meth:`~scrapy.settings.BaseSettings.get`, and losing all information about priority and mutability. :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any'
def getdict(self, name, default=None):
value = self.get(name, (default or {})) if isinstance(value, six.string_types): value = json.loads(value) return dict(value)
'Get a composition of a dictionary-like setting and its `_BASE` counterpart. :param name: name of the dictionary-like setting :type name: string'
def getwithbase(self, name):
compbs = BaseSettings() compbs.update(self[(name + '_BASE')]) compbs.update(self[name]) return compbs
'Return the current numerical priority value of a setting, or ``None`` if the given ``name`` does not exist. :param name: the setting name :type name: string'
def getpriority(self, name):
if (name not in self): return None return self.attributes[name].priority
'Return the numerical value of the highest priority present throughout all settings, or the numerical value for ``default`` from :attr:`~scrapy.settings.SETTINGS_PRIORITIES` if there are no settings stored.'
def maxpriority(self):
if (len(self) > 0): return max((self.getpriority(name) for name in self)) else: return get_settings_priority('default')
'Store a key/value attribute with a given priority. Settings should be populated *before* configuring the Crawler object (through the :meth:`~scrapy.crawler.Crawler.configure` method), otherwise they won\'t have any effect. :param name: the setting name :type name: string :param value: the value to associate with the setting :type value: any :param priority: the priority of the setting. Should be a key of :attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer :type priority: string or int'
def set(self, name, value, priority='project'):
self._assert_mutability() priority = get_settings_priority(priority) if (name not in self): if isinstance(value, SettingsAttribute): self.attributes[name] = value else: self.attributes[name] = SettingsAttribute(value, priority) else: self.attributes[name].set(value, priority)
'Store settings from a module with a given priority. This is a helper function that calls :meth:`~scrapy.settings.BaseSettings.set` for every globally declared uppercase variable of ``module`` with the provided ``priority``. :param module: the module or the path of the module :type module: module object or string :param priority: the priority of the settings. Should be a key of :attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer :type priority: string or int'
def setmodule(self, module, priority='project'):
self._assert_mutability() if isinstance(module, six.string_types): module = import_module(module) for key in dir(module): if key.isupper(): self.set(key, getattr(module, key), priority)
'Store key/value pairs with a given priority. This is a helper function that calls :meth:`~scrapy.settings.BaseSettings.set` for every item of ``values`` with the provided ``priority``. If ``values`` is a string, it is assumed to be JSON-encoded and parsed into a dict with ``json.loads()`` first. If it is a :class:`~scrapy.settings.BaseSettings` instance, the per-key priorities will be used and the ``priority`` parameter ignored. This allows inserting/updating settings with different priorities with a single command. :param values: the settings names and values :type values: dict or string or :class:`~scrapy.settings.BaseSettings` :param priority: the priority of the settings. Should be a key of :attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer :type priority: string or int'
def update(self, values, priority='project'):
self._assert_mutability() if isinstance(values, six.string_types): values = json.loads(values) if (values is not None): if isinstance(values, BaseSettings): for (name, value) in six.iteritems(values): self.set(name, value, values.getpriority(name)) else: for (name, value) in six.iteritems(values): self.set(name, value, priority)
'Make a deep copy of current settings. This method returns a new instance of the :class:`Settings` class, populated with the same values and their priorities. Modifications to the new object won\'t be reflected on the original settings.'
def copy(self):
return copy.deepcopy(self)
'Disable further changes to the current settings. After calling this method, the present state of the settings will become immutable. Trying to change values through the :meth:`~set` method and its variants won\'t be possible and will be alerted.'
def freeze(self):
self.frozen = True
'Return an immutable copy of the current settings. Alias for a :meth:`~freeze` call in the object returned by :meth:`copy`.'
def frozencopy(self):
copy = self.copy() copy.freeze() return copy
'Make a copy of current settings and convert to a dict. This method returns a new dict populated with the same values and their priorities as the current settings. Modifications to the returned dict won\'t be reflected on the original settings. This method can be useful for example for printing settings in Scrapy shell.'
def copy_to_dict(self):
settings = self.copy() return settings._to_dict()
'Log the given message at the given log level This helper wraps a log call to the logger within the spider, but you can use it directly (e.g. Spider.logger.info(\'msg\')) or use any other Python logger too.'
def log(self, message, level=logging.DEBUG, **kw):
self.logger.log(level, message, **kw)
'This method is deprecated.'
def make_requests_from_url(self, url):
return Request(url, dont_filter=True)
'This overridable method is called for each result (item or request) returned by the spider, and it\'s intended to perform any last time processing required before returning the results to the framework core, for example setting the item GUIDs. It receives a list of results and the response which originated that results. It must return a list of results (Items or Requests).'
def process_results(self, response, results):
return results
'You can override this function in order to make any changes you want to into the feed before parsing it. This function must return a response.'
def adapt_response(self, response):
return response
'This method must be overriden with your custom spider functionality'
def parse_node(self, response, selector):
if hasattr(self, 'parse_item'): return self.parse_item(response, selector) raise NotImplementedError
'This method is called for the nodes matching the provided tag name (itertag). Receives the response and an Selector for each node. Overriding this method is mandatory. Otherwise, you spider won\'t work. This method must return either a BaseItem, a Request, or a list containing any of them.'
def parse_nodes(self, response, nodes):
for selector in nodes: ret = iterate_spider_output(self.parse_node(response, selector)) for result_item in self.process_results(response, ret): (yield result_item)
'This method has the same purpose as the one in XMLFeedSpider'
def process_results(self, response, results):
return results
'This method has the same purpose as the one in XMLFeedSpider'
def adapt_response(self, response):
return response
'This method must be overriden with your custom spider functionality'
def parse_row(self, response, row):
raise NotImplementedError
'Receives a response and a dict (representing each row) with a key for each provided (or detected) header of the CSV file. This spider also gives the opportunity to override adapt_response and process_results methods for pre and post-processing purposes.'
def parse_rows(self, response):
for row in csviter(response, self.delimiter, self.headers, self.quotechar): ret = iterate_spider_output(self.parse_row(response, row)) for result_item in self.process_results(response, ret): (yield result_item)
'Return the sitemap body contained in the given response, or None if the response is not a sitemap.'
def _get_sitemap_body(self, response):
if isinstance(response, XmlResponse): return response.body elif gzip_magic_number(response): return gunzip(response.body) elif (response.url.endswith('.xml') or response.url.endswith('.xml.gz')): return response.body
'This method must be set as the callback of your last initialization request. See self.init_request() docstring for more info.'
def initialized(self, response=None):
return self.__dict__.pop('_postinit_reqs')
'This function should return one initialization request, with the self.initialized method as callback. When the self.initialized method is called this spider is considered initialized. If you need to perform several requests for initializing your spider, you can do so by using different callbacks. The only requirement is that the final callback (of the last initialization request) must be self.initialized. The default implementation calls self.initialized immediately, and means that no initialization is needed. This method should be overridden only when you need to perform requests to initialize your spider'
def init_request(self):
return self.initialized()
'Normalize key to bytes'
def normkey(self, key):
return self._tobytes(key.title())
'Normalize values to bytes'
def normvalue(self, value):
if (value is None): value = [] elif isinstance(value, (six.text_type, bytes)): value = [value] elif (not hasattr(value, '__iter__')): value = [value] return [self._tobytes(x) for x in value]
'Return headers as a CaselessDict with unicode keys and unicode values. Multiple values are joined with \',\'.'
def to_unicode_dict(self):
return CaselessDict(((to_unicode(key, encoding=self.encoding), to_unicode(','.join(value), encoding=self.encoding)) for (key, value) in self.items()))
'Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965. It defaults to False. An unverifiable request is one whose URL the user did not have the option to approve. For example, if the request is for an image in an HTML document, and the user had no option to approve the automatic fetching of the image, this should be true.'
def is_unverifiable(self):
return self.request.meta.get('is_unverifiable', False)
'Return a copy of this Response'
def copy(self):
return self.replace()
'Create a new Response with the same attributes except for those given new values.'
def replace(self, *args, **kwargs):
for x in ['url', 'status', 'headers', 'body', 'request', 'flags']: kwargs.setdefault(x, getattr(self, x)) cls = kwargs.pop('cls', self.__class__) return cls(*args, **kwargs)
'Join this Response\'s url with a possible relative url to form an absolute interpretation of the latter.'
def urljoin(self, url):
return urljoin(self.url, url)
'For subclasses of TextResponse, this will return the body as text (unicode object in Python 2 and str in Python 3)'
@property def text(self):
raise AttributeError("Response content isn't text")
'Shortcut method implemented only by responses whose content is text (subclasses of TextResponse).'
def css(self, *a, **kw):
raise NotSupported("Response content isn't text")
'Shortcut method implemented only by responses whose content is text (subclasses of TextResponse).'
def xpath(self, *a, **kw):
raise NotSupported("Response content isn't text")
'Return a :class:`~.Request` instance to follow a link ``url``. It accepts the same arguments as ``Request.__init__`` method, but ``url`` can be a relative URL or a ``scrapy.link.Link`` object, not only an absolute URL. :class:`~.TextResponse` provides a :meth:`~.TextResponse.follow` method which supports selectors in addition to absolute/relative URLs and Link objects.'
def follow(self, url, callback=None, method='GET', headers=None, body=None, cookies=None, meta=None, encoding='utf-8', priority=0, dont_filter=False, errback=None):
if isinstance(url, Link): url = url.url url = self.urljoin(url) return Request(url, callback, method=method, headers=headers, body=body, cookies=cookies, meta=meta, encoding=encoding, priority=priority, dont_filter=dont_filter, errback=errback)
'Return body as unicode'
def body_as_unicode(self):
return self.text
'Body as unicode'
@property def text(self):
benc = self.encoding if (self._cached_ubody is None): charset = ('charset=%s' % benc) self._cached_ubody = html_to_unicode(charset, self.body)[1] return self._cached_ubody
'Join this Response\'s url with a possible relative url to form an absolute interpretation of the latter.'
def urljoin(self, url):
return urljoin(get_base_url(self), url)
'Return a :class:`~.Request` instance to follow a link ``url``. It accepts the same arguments as ``Request.__init__`` method, but ``url`` can be not only an absolute URL, but also * a relative URL; * a scrapy.link.Link object (e.g. a link extractor result); * an attribute Selector (not SelectorList) - e.g. ``response.css(\'a::attr(href)\')[0]`` or ``response.xpath(\'//img/@src\')[0]``. * a Selector for ``<a>`` or ``<link>`` element, e.g. ``response.css(\'a.my_link\')[0]``. See :ref:`response-follow-example` for usage examples.'
def follow(self, url, callback=None, method='GET', headers=None, body=None, cookies=None, meta=None, encoding=None, priority=0, dont_filter=False, errback=None):
if isinstance(url, parsel.Selector): url = _url_from_selector(url) elif isinstance(url, parsel.SelectorList): raise ValueError('SelectorList is not supported') encoding = (self.encoding if (encoding is None) else encoding) return super(TextResponse, self).follow(url, callback, method=method, headers=headers, body=body, cookies=cookies, meta=meta, encoding=encoding, priority=priority, dont_filter=dont_filter, errback=errback)
'Return a copy of this Request'
def copy(self):
return self.replace()
'Create a new Request with the same attributes except for those given new values.'
def replace(self, *args, **kwargs):
for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'encoding', 'priority', 'dont_filter', 'callback', 'errback']: kwargs.setdefault(x, getattr(self, x)) cls = kwargs.pop('cls', self.__class__) return cls(*args, **kwargs)
'send notification mail with some additional useful info'
def _send_report(self, rcpts, subject):
stats = self.crawler.stats s = ('Memory usage at engine startup : %dM\r\n' % ((stats.get_value('memusage/startup') / 1024) / 1024)) s += ('Maximum memory usage : %dM\r\n' % ((stats.get_value('memusage/max') / 1024) / 1024)) s += ('Current memory usage : %dM\r\n' % ((self.get_virtual_size() / 1024) / 1024)) s += 'ENGINE STATUS ------------------------------------------------------- \r\n' s += '\r\n' s += pformat(get_engine_status(self.crawler.engine)) s += '\r\n' self.mail.send(rcpts, subject, s)
'Return response if present in cache, or None otherwise.'
def retrieve_response(self, spider, request):
metadata = self._read_meta(spider, request) if (metadata is None): return rpath = self._get_request_path(spider, request) with self._open(os.path.join(rpath, 'response_body'), 'rb') as f: body = f.read() with self._open(os.path.join(rpath, 'response_headers'), 'rb') as f: rawheaders = f.read() url = metadata.get('response_url') status = metadata['status'] headers = Headers(headers_raw_to_dict(rawheaders)) respcls = responsetypes.from_args(headers=headers, url=url) response = respcls(url=url, headers=headers, status=status, body=body) return response
'Store the given response in the cache.'
def store_response(self, spider, request, response):
rpath = self._get_request_path(spider, request) if (not os.path.exists(rpath)): os.makedirs(rpath) metadata = {'url': request.url, 'method': request.method, 'status': response.status, 'response_url': response.url, 'timestamp': time()} with self._open(os.path.join(rpath, 'meta'), 'wb') as f: f.write(to_bytes(repr(metadata))) with self._open(os.path.join(rpath, 'pickled_meta'), 'wb') as f: pickle.dump(metadata, f, protocol=2) with self._open(os.path.join(rpath, 'response_headers'), 'wb') as f: f.write(headers_dict_to_raw(response.headers)) with self._open(os.path.join(rpath, 'response_body'), 'wb') as f: f.write(response.body) with self._open(os.path.join(rpath, 'request_headers'), 'wb') as f: f.write(headers_dict_to_raw(request.headers)) with self._open(os.path.join(rpath, 'request_body'), 'wb') as f: f.write(request.body)
'Define delay adjustment policy'
def _adjust_delay(self, slot, latency, response):
target_delay = (latency / self.target_concurrency) new_delay = ((slot.delay + target_delay) / 2.0) new_delay = max(target_delay, new_delay) new_delay = min(max(self.mindelay, new_delay), self.maxdelay) if ((response.status != 200) and (new_delay <= slot.delay)): return slot.delay = new_delay
'Connect a receiver function to a signal. The signal can be any object, although Scrapy comes with some predefined signals that are documented in the :ref:`topics-signals` section. :param receiver: the function to be connected :type receiver: callable :param signal: the signal to connect to :type signal: object'
def connect(self, receiver, signal, **kwargs):
kwargs.setdefault('sender', self.sender) return dispatcher.connect(receiver, signal, **kwargs)
'Disconnect a receiver function from a signal. This has the opposite effect of the :meth:`connect` method, and the arguments are the same.'
def disconnect(self, receiver, signal, **kwargs):
kwargs.setdefault('sender', self.sender) return dispatcher.disconnect(receiver, signal, **kwargs)
'Send a signal, catch exceptions and log them. The keyword arguments are passed to the signal handlers (connected through the :meth:`connect` method).'
def send_catch_log(self, signal, **kwargs):
kwargs.setdefault('sender', self.sender) return _signal.send_catch_log(signal, **kwargs)
'Like :meth:`send_catch_log` but supports returning `deferreds`_ from signal handlers. Returns a Deferred that gets fired once all signal handlers deferreds were fired. Send a signal, catch exceptions and log them. The keyword arguments are passed to the signal handlers (connected through the :meth:`connect` method). .. _deferreds: http://twistedmatrix.com/documents/current/core/howto/defer.html'
def send_catch_log_deferred(self, signal, **kwargs):
kwargs.setdefault('sender', self.sender) return _signal.send_catch_log_deferred(signal, **kwargs)
'Disconnect all receivers from the given signal. :param signal: the signal to disconnect from :type signal: object'
def disconnect_all(self, signal, **kwargs):
kwargs.setdefault('sender', self.sender) return _signal.disconnect_all(signal, **kwargs)
'Configure the exporter by poping options from the ``options`` dict. If dont_fail is set, it won\'t raise an exception on unexpected options (useful for using with keyword arguments in subclasses constructors)'
def _configure(self, options, dont_fail=False):
self.encoding = options.pop('encoding', None) self.fields_to_export = options.pop('fields_to_export', None) self.export_empty_fields = options.pop('export_empty_fields', False) self.indent = options.pop('indent', None) if ((not dont_fail) and options): raise TypeError(('Unexpected options: %s' % ', '.join(options.keys())))
'Return the fields to export as an iterable of tuples (name, serialized_value)'
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
if (include_empty is None): include_empty = self.export_empty_fields if (self.fields_to_export is None): if (include_empty and (not isinstance(item, dict))): field_iter = six.iterkeys(item.fields) else: field_iter = six.iterkeys(item) elif include_empty: field_iter = self.fields_to_export else: field_iter = (x for x in self.fields_to_export if (x in item)) for field_name in field_iter: if (field_name in item): field = ({} if isinstance(item, dict) else item.fields[field_name]) value = self.serialize_field(field, field_name, item[field_name]) else: value = default_value (yield (field_name, value))
'Query value for the jmespath query and return answer :param value: a data structure (dict, list) to extract from :return: Element extracted according to jmespath query'
def __call__(self, value):
return self.compiled_path.search(value)
'Do the real extraction work'
def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
self.reset() self.feed(response_text) self.close() ret = [] if (base_url is None): base_url = (urljoin(response_url, self.base_url) if self.base_url else response_url) for link in self.links: if isinstance(link.url, six.text_type): link.url = link.url.encode(response_encoding) try: link.url = urljoin(base_url, link.url) except ValueError: continue link.url = safe_url_string(link.url, response_encoding) link.text = to_unicode(link.text, response_encoding, errors='replace').strip() ret.append(link) return ret
'Normalize and filter extracted links The subclass should override it if necessary'
def _process_links(self, links):
return (unique_list(links, key=self.link_key) if self.unique else links)
'This extractor matches with any url, since it doesn\'t contain any patterns'
def matches(self, url):
return True
'Normalize and filter extracted links The subclass should override it if neccessary'
def _process_links(self, links):
return self._deduplicate_if_needed(links)
'This extractor matches with any url, since it doesn\'t contain any patterns'
def matches(self, url):
return True
'Returns the last data value for this key, or [] if it\'s an empty list; raises KeyError if not found.'
def __getitem__(self, key):
try: list_ = dict.__getitem__(self, key) except KeyError: raise MultiValueDictKeyError(('Key %r not found in %r' % (key, self))) try: return list_[(-1)] except IndexError: return []
'Returns the default value if the requested data doesn\'t exist'
def get(self, key, default=None):
try: val = self[key] except KeyError: return default if (val == []): return default return val
'Returns an empty list if the requested data doesn\'t exist'
def getlist(self, key):
try: return dict.__getitem__(self, key) except KeyError: return []
'Appends an item to the internal list associated with key'
def appendlist(self, key, value):
self.setlistdefault(key, []) dict.__setitem__(self, key, (self.getlist(key) + [value]))
'Returns a list of (key, value) pairs, where value is the last item in the list associated with the key.'
def items(self):
return [(key, self[key]) for key in self.keys()]
'Returns a list of (key, list) pairs.'
def lists(self):
return dict.items(self)
'Returns a list of the last value on every key list.'
def values(self):
return [self[key] for key in self.keys()]
'Returns a copy of this object.'
def copy(self):
return self.__deepcopy__()
'update() extends rather than replaces existing key lists. Also accepts keyword args.'
def update(self, *args, **kwargs):
if (len(args) > 1): raise TypeError(('update expected at most 1 arguments, got %d' % len(args))) if args: other_dict = args[0] if isinstance(other_dict, MultiValueDict): for (key, value_list) in other_dict.lists(): self.setlistdefault(key, []).extend(value_list) else: try: for (key, value) in other_dict.items(): self.setlistdefault(key, []).append(value) except TypeError: raise ValueError('MultiValueDict.update() takes either a MultiValueDict or dictionary') for (key, value) in six.iteritems(kwargs): self.setlistdefault(key, []).append(value)
'Method to normalize dictionary key access'
def normkey(self, key):
return key.lower()
'Method to normalize values prior to be setted'
def normvalue(self, value):
return value
'Returns a copy of this object.'
def copy(self):
return self.__copy__()
'Override this method to implement a different offsite policy'
def get_host_regex(self, spider):
allowed_domains = getattr(spider, 'allowed_domains', None) if (not allowed_domains): return re.compile('') regex = ('^(.*\\.)?(%s)$' % '|'.join((re.escape(d) for d in allowed_domains if (d is not None)))) return re.compile(regex)
'https://www.w3.org/TR/referrer-policy/#strip-url If url is null, return no referrer. If url\'s scheme is a local scheme, then return no referrer. Set url\'s username to the empty string. Set url\'s password to null. Set url\'s fragment to null. If the origin-only flag is true, then: Set url\'s path to null. Set url\'s query to null. Return url.'
def strip_url(self, url, origin_only=False):
if (not url): return None return strip_url(url, strip_credentials=True, strip_fragment=True, strip_default_port=True, origin_only=origin_only)
'Return serialized origin (scheme, host, path) for a request or response URL.'
def origin(self, url):
return self.strip_url(url, origin_only=True)