desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Determine Referrer-Policy to use from a parent Response (or URL), and a Request to be sent. - if a valid policy is set in Request meta, it is used. - if the policy is set in meta but is wrong (e.g. a typo error), the policy from settings is used - if the policy is not set in Request meta, but there is a Referrer-policy header in the parent response, it is used if valid - otherwise, the policy from settings is used.'
def policy(self, resp_or_url, request):
policy_name = request.meta.get('referrer_policy') if (policy_name is None): if isinstance(resp_or_url, Response): policy_header = resp_or_url.headers.get('Referrer-Policy') if (policy_header is not None): policy_name = to_native_str(policy_header.decode('latin1')) if (policy_name is None): return self.default_policy() cls = _load_policy_class(policy_name, warning_only=True) return (cls() if cls else self.default_policy())
'Return True if a page without hash fragment could be "AJAX crawlable" according to https://developers.google.com/webmasters/ajax-crawling/docs/getting-started.'
def _has_ajax_crawlable_variant(self, response):
body = response.text[:self.lookup_bytes] return _has_ajaxcrawlable_meta(body)
':param prefix: string for setting keys :return: dictionary of image pipeline settings'
def _generate_fake_settings(self, prefix=None):
def random_string(): return ''.join([chr(random.randint(97, 123)) for _ in range(10)]) settings = {'IMAGES_EXPIRES': random.randint(100, 1000), 'IMAGES_STORE': self.tempdir, 'IMAGES_RESULT_FIELD': random_string(), 'IMAGES_URLS_FIELD': random_string(), 'IMAGES_MIN_WIDTH': random.randint(1, 1000), 'IMAGES_MIN_HEIGHT': random.randint(1, 1000), 'IMAGES_THUMBS': {'small': (random.randint(1, 1000), random.randint(1, 1000)), 'big': (random.randint(1, 1000), random.randint(1, 1000))}} if (not prefix): return settings return {(((prefix.upper() + '_') + k) if (k != 'IMAGES_STORE') else k): v for (k, v) in settings.items()}
':return: ImagePipeline class will all uppercase attributes set.'
def _generate_fake_pipeline_subclass(self):
class UserDefinedImagePipeline(ImagesPipeline, ): MIN_WIDTH = random.randint(1000, 2000) MIN_HEIGHT = random.randint(1000, 2000) THUMBS = {'small': (random.randint(1000, 2000), random.randint(1000, 2000)), 'big': (random.randint(1000, 2000), random.randint(1000, 2000))} EXPIRES = random.randint(1000, 2000) IMAGES_URLS_FIELD = 'field_one' IMAGES_RESULT_FIELD = 'field_two' return UserDefinedImagePipeline
'If there are two instances of ImagesPipeline class with different settings, they should have different settings.'
def test_different_settings_for_different_instances(self):
custom_settings = self._generate_fake_settings() default_settings = Settings() default_sts_pipe = ImagesPipeline(self.tempdir, settings=default_settings) user_sts_pipe = ImagesPipeline.from_settings(Settings(custom_settings)) for (pipe_attr, settings_attr) in self.img_cls_attribute_names: expected_default_value = self.default_pipeline_settings.get(pipe_attr) custom_value = custom_settings.get(settings_attr) self.assertNotEqual(expected_default_value, custom_value) self.assertEqual(getattr(default_sts_pipe, pipe_attr.lower()), expected_default_value) self.assertEqual(getattr(user_sts_pipe, pipe_attr.lower()), custom_value)
'If image settings are not defined at all subclass of ImagePipeline takes values from class attributes.'
def test_subclass_attrs_preserved_default_settings(self):
pipeline_cls = self._generate_fake_pipeline_subclass() pipeline = pipeline_cls.from_settings(Settings({'IMAGES_STORE': self.tempdir})) for (pipe_attr, settings_attr) in self.img_cls_attribute_names: attr_value = getattr(pipeline, pipe_attr.lower()) self.assertNotEqual(attr_value, self.default_pipeline_settings[pipe_attr]) self.assertEqual(attr_value, getattr(pipeline, pipe_attr))
'If image settings are defined but they are not defined for subclass default values taken from settings should be preserved.'
def test_subclass_attrs_preserved_custom_settings(self):
pipeline_cls = self._generate_fake_pipeline_subclass() settings = self._generate_fake_settings() pipeline = pipeline_cls.from_settings(Settings(settings)) for (pipe_attr, settings_attr) in self.img_cls_attribute_names: value = getattr(pipeline, pipe_attr.lower()) self.assertNotEqual(value, self.default_pipeline_settings[pipe_attr]) setings_value = settings.get(settings_attr) self.assertEqual(value, setings_value)
'If there are no settings for subclass and no subclass attributes, pipeline should use attributes of base class.'
def test_no_custom_settings_for_subclasses(self):
class UserDefinedImagePipeline(ImagesPipeline, ): pass user_pipeline = UserDefinedImagePipeline.from_settings(Settings({'IMAGES_STORE': self.tempdir})) for (pipe_attr, settings_attr) in self.img_cls_attribute_names: custom_value = self.default_pipeline_settings.get(pipe_attr.upper()) self.assertEqual(getattr(user_pipeline, pipe_attr.lower()), custom_value)
'If there are custom settings for subclass and NO class attributes, pipeline should use custom settings.'
def test_custom_settings_for_subclasses(self):
class UserDefinedImagePipeline(ImagesPipeline, ): pass prefix = UserDefinedImagePipeline.__name__.upper() settings = self._generate_fake_settings(prefix=prefix) user_pipeline = UserDefinedImagePipeline.from_settings(Settings(settings)) for (pipe_attr, settings_attr) in self.img_cls_attribute_names: custom_value = settings.get(((prefix + '_') + settings_attr)) self.assertNotEqual(custom_value, self.default_pipeline_settings[pipe_attr]) self.assertEqual(getattr(user_pipeline, pipe_attr.lower()), custom_value)
'If there are custom settings for subclass AND class attributes setting keys are preferred and override attributes.'
def test_custom_settings_and_class_attrs_for_subclasses(self):
pipeline_cls = self._generate_fake_pipeline_subclass() prefix = pipeline_cls.__name__.upper() settings = self._generate_fake_settings(prefix=prefix) user_pipeline = pipeline_cls.from_settings(Settings(settings)) for (pipe_attr, settings_attr) in self.img_cls_attribute_names: custom_value = settings.get(((prefix + '_') + settings_attr)) self.assertNotEqual(custom_value, self.default_pipeline_settings[pipe_attr]) self.assertEqual(getattr(user_pipeline, pipe_attr.lower()), custom_value)
'Test situation when user defines subclass of ImagePipeline, but uses attribute names for default pipeline (without prefixing them with pipeline class name).'
def test_user_defined_subclass_default_key_names(self):
settings = self._generate_fake_settings() class UserPipe(ImagesPipeline, ): pass pipeline_cls = UserPipe.from_settings(Settings(settings)) for (pipe_attr, settings_attr) in self.img_cls_attribute_names: expected_value = settings.get(settings_attr) self.assertEqual(getattr(pipeline_cls, pipe_attr.lower()), expected_value)
'Referer header is set by RefererMiddleware unless it is already set'
@defer.inlineCallbacks def test_referer_header(self):
req0 = Request('http://localhost:8998/echo?headers=1&body=0', dont_filter=1) req1 = req0.replace() req2 = req0.replace(headers={'Referer': None}) req3 = req0.replace(headers={'Referer': 'http://example.com'}) req0.meta['next'] = req1 req1.meta['next'] = req2 req2.meta['next'] = req3 crawler = self.runner.create_crawler(SingleRequestSpider) (yield crawler.crawl(seed=req0)) self.assertIn('responses', crawler.spider.meta) self.assertNotIn('failures', crawler.spider.meta) echo0 = json.loads(to_unicode(crawler.spider.meta['responses'][2].body)) self.assertNotIn('Referer', echo0['headers']) echo1 = json.loads(to_unicode(crawler.spider.meta['responses'][1].body)) self.assertEqual(echo1['headers'].get('Referer'), [req0.url]) echo2 = json.loads(to_unicode(crawler.spider.meta['responses'][2].body)) self.assertNotIn('Referer', echo2['headers']) echo3 = json.loads(to_unicode(crawler.spider.meta['responses'][3].body)) self.assertEqual(echo3['headers'].get('Referer'), ['http://example.com'])
'Test whether errors happening anywhere in Crawler.crawl() are properly reported (and not somehow swallowed) after a graceful engine shutdown. The errors should not come from within Scrapy\'s core but from within spiders/middlewares/etc., e.g. raised in Spider.start_requests(), SpiderMiddleware.process_start_requests(), etc.'
@defer.inlineCallbacks def test_graceful_crawl_error_handling(self):
class TestError(Exception, ): pass class FaultySpider(SimpleSpider, ): def start_requests(self): raise TestError crawler = self.runner.create_crawler(FaultySpider) (yield self.assertFailure(crawler.crawl(), TestError)) self.assertFalse(crawler.crawling)
'Test that the resulting urls are str objects'
def test_urls_type(self):
lx = self.extractor_cls() self.assertTrue(all((isinstance(link.url, str) for link in lx.extract_links(self.response))))
'Test the extractor\'s behaviour among different situations'
def test_extraction_using_single_values(self):
lx = self.extractor_cls(allow='sample') self.assertEqual([link for link in lx.extract_links(self.response)], [Link(url='http://example.com/sample1.html', text=u''), Link(url='http://example.com/sample2.html', text=u'sample 2'), Link(url='http://example.com/sample3.html', text=u'sample 3 text'), Link(url='http://example.com/sample3.html#foo', text='sample 3 repetition with fragment')]) lx = self.extractor_cls(allow='sample', deny='3') self.assertEqual([link for link in lx.extract_links(self.response)], [Link(url='http://example.com/sample1.html', text=u''), Link(url='http://example.com/sample2.html', text=u'sample 2')]) lx = self.extractor_cls(allow_domains='google.com') self.assertEqual([link for link in lx.extract_links(self.response)], [Link(url='http://www.google.com/something', text=u'')]) lx = self.extractor_cls(deny_domains='example.com') self.assertEqual([link for link in lx.extract_links(self.response)], [Link(url='http://www.google.com/something', text=u'')])
'Test the extractor\'s behaviour for links with rel="nofollow"'
def test_nofollow(self):
html = '<html><head><title>Page title<title>\n <body>\n <div class=\'links\'>\n <p><a href="/about.html">About us</a></p>\n </div>\n <div>\n <p><a href="/follow.html">Follow this link</a></p>\n </div>\n <div>\n <p><a href="/nofollow.html" rel="nofollow">Dont follow this one</a></p>\n </div>\n <div>\n <p><a href="/nofollow2.html" rel="blah">Choose to follow or not</a></p>\n </div>\n <div>\n <p><a href="http://google.com/something" rel="external nofollow">External link not to follow</a></p>\n </div>\n </body></html>' response = HtmlResponse('http://example.org/somepage/index.html', body=html) lx = self.extractor_cls() self.assertEqual(lx.extract_links(response), [Link(url='http://example.org/about.html', text=u'About us'), Link(url='http://example.org/follow.html', text=u'Follow this link'), Link(url='http://example.org/nofollow.html', text=u'Dont follow this one', nofollow=True), Link(url='http://example.org/nofollow2.html', text=u'Choose to follow or not'), Link(url='http://google.com/something', text=u'External link not to follow', nofollow=True)])
'Test restrict_xpaths with encodings'
def test_restrict_xpaths_encoding(self):
html = '<html><head><title>Page title<title>\n <body><p><a href="item/12.html">Item 12</a></p>\n <div class=\'links\'>\n <p><a href="/about.html">About us\xa3</a></p>\n </div>\n <div>\n <p><a href="/nofollow.html">This shouldn\'t be followed</a></p>\n </div>\n </body></html>' response = HtmlResponse('http://example.org/somepage/index.html', body=html, encoding='windows-1252') lx = self.extractor_cls(restrict_xpaths="//div[@class='links']") self.assertEqual(lx.extract_links(response), [Link(url='http://example.org/about.html', text=u'About us\xa3')])
'html entities cause SGMLParser to call handle_data hook twice'
def test_restrict_xpaths_concat_in_handle_data(self):
body = '<html><body><div><a href="/foo">&gt;\xbe\xa9&lt;\xb6\xab</a></body></html>' response = HtmlResponse('http://example.org', body=body, encoding='gb18030') lx = self.extractor_cls(restrict_xpaths='//div') self.assertEqual(lx.extract_links(response), [Link(url='http://example.org/foo', text=u'>\u4eac<\u4e1c', fragment='', nofollow=False)])
'Test restrict_xpaths with encodings'
def test_process_value(self):
html = '\n <a href="javascript:goToPage(\'../other/page.html\',\'photo\',\'width=600,height=540,scrollbars\'); return false">Link text</a>\n <a href="/about.html">About us</a>\n ' response = HtmlResponse('http://example.org/somepage/index.html', body=html, encoding='windows-1252') def process_value(value): m = re.search("javascript:goToPage\\('(.*?)'", value) if m: return m.group(1) lx = self.extractor_cls(process_value=process_value) self.assertEqual(lx.extract_links(response), [Link(url='http://example.org/other/page.html', text='Link text')])
'If there are different instances with different settings they should keep different settings.'
def test_different_settings_for_different_instances(self):
custom_settings = self._generate_fake_settings() another_pipeline = FilesPipeline.from_settings(Settings(custom_settings)) one_pipeline = FilesPipeline(self.tempdir) for (pipe_attr, settings_attr, pipe_ins_attr) in self.file_cls_attr_settings_map: default_value = self.default_cls_settings[pipe_attr] self.assertEqual(getattr(one_pipeline, pipe_attr), default_value) custom_value = custom_settings[settings_attr] self.assertNotEqual(default_value, custom_value) self.assertEqual(getattr(another_pipeline, pipe_ins_attr), custom_value)
'If subclasses override class attributes and there are no special settings those values should be kept.'
def test_subclass_attributes_preserved_if_no_settings(self):
pipe_cls = self._generate_fake_pipeline() pipe = pipe_cls.from_settings(Settings({'FILES_STORE': self.tempdir})) for (pipe_attr, settings_attr, pipe_ins_attr) in self.file_cls_attr_settings_map: custom_value = getattr(pipe, pipe_ins_attr) self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr]) self.assertEqual(getattr(pipe, pipe_ins_attr), getattr(pipe, pipe_attr))
'If file settings are defined but they are not defined for subclass settings should be preserved.'
def test_subclass_attrs_preserved_custom_settings(self):
pipeline_cls = self._generate_fake_pipeline() settings = self._generate_fake_settings() pipeline = pipeline_cls.from_settings(Settings(settings)) for (pipe_attr, settings_attr, pipe_ins_attr) in self.file_cls_attr_settings_map: value = getattr(pipeline, pipe_ins_attr) setting_value = settings.get(settings_attr) self.assertNotEqual(value, self.default_cls_settings[pipe_attr]) self.assertEqual(value, setting_value)
'If there are no settings for subclass and no subclass attributes, pipeline should use attributes of base class.'
def test_no_custom_settings_for_subclasses(self):
class UserDefinedFilesPipeline(FilesPipeline, ): pass user_pipeline = UserDefinedFilesPipeline.from_settings(Settings({'FILES_STORE': self.tempdir})) for (pipe_attr, settings_attr, pipe_ins_attr) in self.file_cls_attr_settings_map: custom_value = self.default_cls_settings.get(pipe_attr.upper()) self.assertEqual(getattr(user_pipeline, pipe_ins_attr), custom_value)
'If there are custom settings for subclass and NO class attributes, pipeline should use custom settings.'
def test_custom_settings_for_subclasses(self):
class UserDefinedFilesPipeline(FilesPipeline, ): pass prefix = UserDefinedFilesPipeline.__name__.upper() settings = self._generate_fake_settings(prefix=prefix) user_pipeline = UserDefinedFilesPipeline.from_settings(Settings(settings)) for (pipe_attr, settings_attr, pipe_inst_attr) in self.file_cls_attr_settings_map: custom_value = settings.get(((prefix + '_') + settings_attr)) self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr]) self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
'If there are custom settings for subclass AND class attributes setting keys are preferred and override attributes.'
def test_custom_settings_and_class_attrs_for_subclasses(self):
pipeline_cls = self._generate_fake_pipeline() prefix = pipeline_cls.__name__.upper() settings = self._generate_fake_settings(prefix=prefix) user_pipeline = pipeline_cls.from_settings(Settings(settings)) for (pipe_cls_attr, settings_attr, pipe_inst_attr) in self.file_cls_attr_settings_map: custom_value = settings.get(((prefix + '_') + settings_attr)) self.assertNotEqual(custom_value, self.default_cls_settings[pipe_cls_attr]) self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
'Test situation when user defines subclass of FilesPipeline, but uses attribute names for default pipeline (without prefixing them with pipeline class name).'
def test_user_defined_subclass_default_key_names(self):
settings = self._generate_fake_settings() class UserPipe(FilesPipeline, ): pass pipeline_cls = UserPipe.from_settings(Settings(settings)) for (pipe_attr, settings_attr, pipe_inst_attr) in self.file_cls_attr_settings_map: expected_value = settings.get(settings_attr) self.assertEqual(getattr(pipeline_cls, pipe_inst_attr), expected_value)
'Constructor arguments are assigned to spider attributes'
def test_spider_args(self):
spider = self.spider_class('example.com', foo='bar') self.assertEqual(spider.foo, 'bar')
'Constructor arguments are assigned to spider attributes'
def test_spider_without_name(self):
self.assertRaises(ValueError, self.spider_class) self.assertRaises(ValueError, self.spider_class, somearg='foo')
'If a rule matches the URL, use it\'s defined callback.'
@defer.inlineCallbacks def test_crawlspider_matching_rule_callback_set(self):
(status, out, stderr) = (yield self.execute(['--spider', ('goodcrawl' + self.spider_name), '-r', self.url('/html')])) self.assertIn("[{}, {'foo': 'bar'}]", to_native_str(out))
'If a rule match but it has no callback set, use the \'parse\' callback.'
@defer.inlineCallbacks def test_crawlspider_matching_rule_default_callback(self):
(status, out, stderr) = (yield self.execute(['--spider', ('goodcrawl' + self.spider_name), '-r', self.url('/text')])) self.assertIn("[{}, {'nomatch': 'default'}]", to_native_str(out))
'Using -r with a spider with no rule should not produce items.'
@defer.inlineCallbacks def test_spider_with_no_rules_attribute(self):
(status, out, stderr) = (yield self.execute(['--spider', self.spider_name, '-r', self.url('/html')])) self.assertRegexpMatches(to_native_str(out), '# Scraped Items -+\n\\[\\]') self.assertIn('No CrawlSpider rules found', to_native_str(stderr))
'The requested URL has no matching rule, so no items should be scraped'
@defer.inlineCallbacks def test_crawlspider_no_matching_rule(self):
(status, out, stderr) = (yield self.execute(['--spider', ('badcrawl' + self.spider_name), '-r', self.url('/enc-gb18030')])) self.assertRegexpMatches(to_native_str(out), '# Scraped Items -+\n\\[\\]') self.assertIn('Cannot find a rule that matches', to_native_str(stderr))
'L{client._parse} should return C{str} for the scheme, host, and path elements of its return tuple, even when passed an URL which has previously been passed to L{urlparse} as a C{unicode} string.'
def test_externalUnicodeInterference(self):
if (not six.PY2): raise unittest.SkipTest('Applies only to Py2, as urls can be ONLY unicode on Py3') badInput = u'http://example.com/path' goodInput = badInput.encode('ascii') self._parse(badInput) (scheme, netloc, host, port, path) = self._parse(goodInput) self.assertTrue(isinstance(scheme, str)) self.assertTrue(isinstance(netloc, str)) self.assertTrue(isinstance(host, str)) self.assertTrue(isinstance(path, str)) self.assertTrue(isinstance(port, int))
'L{client.getPage} returns a L{Deferred} which is called back with the body of the response if the default method B{GET} is used.'
def test_getPage(self):
d = getPage(self.getURL('file')) d.addCallback(self.assertEqual, '0123456789') return d
'L{client.getPage} returns a L{Deferred} which is called back with the empty string if the method is C{HEAD} and there is a successful response code.'
def test_getPageHead(self):
def _getPage(method): return getPage(self.getURL('file'), method=method) return defer.gatherResults([_getPage('head').addCallback(self.assertEqual, ''), _getPage('HEAD').addCallback(self.assertEqual, '')])
'When a non-zero timeout is passed to L{getPage} and the page is retrieved before the timeout period elapses, the L{Deferred} is called back with the contents of the page.'
def test_timeoutNotTriggering(self):
d = getPage(self.getURL('host'), timeout=100) d.addCallback(self.assertEqual, to_bytes(('127.0.0.1:%d' % self.portno))) return d
'When a non-zero timeout is passed to L{getPage} and that many seconds elapse before the server responds to the request. the L{Deferred} is errbacked with a L{error.TimeoutError}.'
def test_timeoutTriggering(self):
finished = self.assertFailure(getPage(self.getURL('wait'), timeout=1e-06), defer.TimeoutError) def cleanup(passthrough): connected = list(six.iterkeys(self.wrapper.protocols)) if connected: connected[0].transport.loseConnection() return passthrough finished.addBoth(cleanup) return finished
'Test that non-standart body encoding matches Content-Encoding header'
def test_encoding(self):
body = '\xd0\x81\xd1\x8e\xd0\xaf' return getPage(self.getURL('encoding'), body=body, response_transform=(lambda r: r)).addCallback(self._check_Encoding, body)
'Example taken from: http://en.wikipedia.org/wiki/Chunked_transfer_encoding'
def test_decode_chunked_transfer(self):
chunked_body = ('25\r\n' + 'This is the data in the first chunk\r\n\r\n') chunked_body += ('1C\r\n' + 'and this is the second one\r\n\r\n') chunked_body += ('3\r\n' + 'con\r\n') chunked_body += ('8\r\n' + 'sequence\r\n') chunked_body += '0\r\n\r\n' body = decode_chunked_transfer(chunked_body) self.assertEqual(body, (('This is the data in the first chunk\r\n' + 'and this is the second one\r\n') + 'consequence'))
'Test that a gzip Content-Encoded .gz file is gunzipped only once by the middleware, leaving gunzipping of the file to upper layers.'
def test_process_response_gzipped_gzip_file(self):
headers = {'Content-Type': 'application/gzip', 'Content-Encoding': 'gzip'} f = BytesIO() plainbody = '<?xml version="1.0" encoding="UTF-8"?>\n<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">\n <url>\n <loc>http://www.example.com/</loc>\n <lastmod>2009-08-16</lastmod>\n <changefreq>daily</changefreq>\n <priority>1</priority>\n </url>\n <url>\n <loc>http://www.example.com/Special-Offers.html</loc>\n <lastmod>2009-08-16</lastmod>\n <changefreq>weekly</changefreq>\n <priority>0.8</priority>\n </url>\n</urlset>' gz_file = GzipFile(fileobj=f, mode='wb') gz_file.write(plainbody) gz_file.close() r = BytesIO() gz_resp = GzipFile(fileobj=r, mode='wb') gz_resp.write(f.getvalue()) gz_resp.close() response = Response('http;//www.example.com/', headers=headers, body=r.getvalue()) request = Request('http://www.example.com/') newresponse = self.mw.process_response(request, response, self.spider) self.assertEqual(gunzip(newresponse.body), plainbody)
'Test Response copy'
def test_copy(self):
r1 = self.response_class('http://www.example.com', body='Some body') r1.flags.append('cached') r2 = r1.copy() self.assertEqual(r1.status, r2.status) self.assertEqual(r1.body, r2.body) assert (r1.flags is not r2.flags), 'flags must be a shallow copy, not identical' self.assertEqual(r1.flags, r2.flags) assert (r1.headers is not r2.headers), 'headers must be a shallow copy, not identical' self.assertEqual(r1.headers, r2.headers)
'Test Response children copies preserve their class'
def test_copy_inherited_classes(self):
class CustomResponse(self.response_class, ): pass r1 = CustomResponse('http://www.example.com') r2 = r1.copy() assert (type(r2) is CustomResponse)
'Test Response.replace() method'
def test_replace(self):
hdrs = Headers({'key': 'value'}) r1 = self.response_class('http://www.example.com') r2 = r1.replace(status=301, body='New body', headers=hdrs) assert (r1.body == '') self.assertEqual(r1.url, r2.url) self.assertEqual((r1.status, r2.status), (200, 301)) self.assertEqual((r1.body, r2.body), ('', 'New body')) self.assertEqual((r1.headers, r2.headers), ({}, hdrs)) r3 = self.response_class('http://www.example.com', flags=['cached']) r4 = r3.replace(body='', flags=[]) self.assertEqual(r4.body, '') self.assertEqual(r4.flags, [])
'Test urljoin shortcut (only for existence, since behavior equals urljoin)'
def test_urljoin(self):
joined = self.response_class('http://www.example.com').urljoin('/test') absolute = 'http://www.example.com/test' self.assertEqual(joined, absolute)
'Check that unknown declared encodings are ignored'
def test_declared_encoding_invalid(self):
r = self.response_class('http://www.example.com', headers={'Content-type': ['text/html; charset=UKNOWN']}, body='\xc2\xa3') self.assertEqual(r._declared_encoding(), None) self._assert_response_values(r, 'utf-8', u'\xa3')
'Test utf-16 because UnicodeDammit is known to have problems with'
def test_utf16(self):
r = self.response_class('http://www.example.com', body='\xff\xfeh\x00i\x00', encoding='utf-16') self._assert_response_values(r, 'utf-16', u'hi')
'Test invalid chars are replaced properly'
def test_replace_wrong_encoding(self):
r = self.response_class('http://www.example.com', encoding='utf-8', body='PREFIX\xe3\xabSUFFIX') assert (u'\ufffd' in r.text), repr(r.text) assert (u'PREFIX' in r.text), repr(r.text) assert (u'SUFFIX' in r.text), repr(r.text) r = self.response_class('http://example.com', encoding='utf-8', body='\xf0<span>value</span>') assert (u'<span>value</span>' in r.text), repr(r.text)
'Test urljoin shortcut which also evaluates base-url through get_base_url().'
def test_urljoin_with_base_url(self):
body = '<html><body><base href="https://example.net"></body></html>' joined = self.response_class('http://www.example.com', body=body).urljoin('/test') absolute = 'https://example.net/test' self.assertEqual(joined, absolute) body = '<html><body><base href="/elsewhere"></body></html>' joined = self.response_class('http://www.example.com', body=body).urljoin('test') absolute = 'http://www.example.com/test' self.assertEqual(joined, absolute) body = '<html><body><base href="/elsewhere/"></body></html>' joined = self.response_class('http://www.example.com', body=body).urljoin('test') absolute = 'http://www.example.com/elsewhere/test' self.assertEqual(joined, absolute)
'Run spider with specified settings; return exported data.'
@defer.inlineCallbacks def run_and_export(self, spider_cls, settings=None):
tmpdir = tempfile.mkdtemp() res_name = (tmpdir + '/res') defaults = {'FEED_URI': ('file://' + res_name), 'FEED_FORMAT': 'csv'} defaults.update((settings or {})) try: with MockServer() as s: runner = CrawlerRunner(Settings(defaults)) (yield runner.crawl(spider_cls)) with open(res_name, 'rb') as f: defer.returnValue(f.read()) finally: shutil.rmtree(tmpdir)
'Return exported data which a spider yielding ``items`` would return.'
@defer.inlineCallbacks def exported_data(self, items, settings):
class TestSpider(scrapy.Spider, ): name = 'testspider' start_urls = ['http://localhost:8998/'] def parse(self, response): for item in items: (yield item) data = (yield self.run_and_export(TestSpider, settings)) defer.returnValue(data)
'Return exported data which a spider yielding no ``items`` would return.'
@defer.inlineCallbacks def exported_no_data(self, settings):
class TestSpider(scrapy.Spider, ): name = 'testspider' start_urls = ['http://localhost:8998/'] def parse(self, response): pass data = (yield self.run_and_export(TestSpider, settings)) defer.returnValue(data)
'Assert we can deal with trailing spaces inside <loc> tags - we\'ve seen those'
def test_sitemap_strip(self):
s = Sitemap('<?xml version="1.0" encoding="UTF-8"?>\n<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">\n <url>\n <loc> http://www.example.com/</loc>\n <lastmod>2009-08-16</lastmod>\n <changefreq>daily</changefreq>\n <priority>1</priority>\n </url>\n <url>\n <loc> http://www.example.com/2</loc>\n <lastmod />\n </url>\n</urlset>\n') self.assertEqual(list(s), [{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'}, {'loc': 'http://www.example.com/2', 'lastmod': ''}])
'We have seen sitemaps with wrongs ns. Presumably, Google still works with these, though is not 100% confirmed'
def test_sitemap_wrong_ns(self):
s = Sitemap('<?xml version="1.0" encoding="UTF-8"?>\n<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">\n <url xmlns="">\n <loc> http://www.example.com/</loc>\n <lastmod>2009-08-16</lastmod>\n <changefreq>daily</changefreq>\n <priority>1</priority>\n </url>\n <url xmlns="">\n <loc> http://www.example.com/2</loc>\n <lastmod />\n </url>\n</urlset>\n') self.assertEqual(list(s), [{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'}, {'loc': 'http://www.example.com/2', 'lastmod': ''}])
'We have seen sitemaps with wrongs ns. Presumably, Google still works with these, though is not 100% confirmed'
def test_sitemap_wrong_ns2(self):
s = Sitemap('<?xml version="1.0" encoding="UTF-8"?>\n<urlset>\n <url xmlns="">\n <loc> http://www.example.com/</loc>\n <lastmod>2009-08-16</lastmod>\n <changefreq>daily</changefreq>\n <priority>1</priority>\n </url>\n <url xmlns="">\n <loc> http://www.example.com/2</loc>\n <lastmod />\n </url>\n</urlset>\n') assert (s.type == 'urlset') self.assertEqual(list(s), [{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'}, {'loc': 'http://www.example.com/2', 'lastmod': ''}])
'Assert we can deal with starting blank lines before <xml> tag'
def test_sitemap_blanklines(self):
s = Sitemap('\n<?xml version="1.0" encoding="UTF-8"?>\n<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n\n<!-- cache: cached = yes name = sitemap_jspCache key = sitemap -->\n<sitemap>\n<loc>http://www.example.com/sitemap1.xml</loc>\n<lastmod>2013-07-15</lastmod>\n</sitemap>\n\n<sitemap>\n<loc>http://www.example.com/sitemap2.xml</loc>\n<lastmod>2013-07-15</lastmod>\n</sitemap>\n\n<sitemap>\n<loc>http://www.example.com/sitemap3.xml</loc>\n<lastmod>2013-07-15</lastmod>\n</sitemap>\n\n<!-- end cache -->\n</sitemapindex>\n') self.assertEqual(list(s), [{'lastmod': '2013-07-15', 'loc': 'http://www.example.com/sitemap1.xml'}, {'lastmod': '2013-07-15', 'loc': 'http://www.example.com/sitemap2.xml'}, {'lastmod': '2013-07-15', 'loc': 'http://www.example.com/sitemap3.xml'}])
'method which returns request @url http://scrapy.org @returns requests 1'
def returns_request(self, response):
return Request('http://scrapy.org', callback=self.returns_item)
'method which returns item @url http://scrapy.org @returns items 1 1'
def returns_item(self, response):
return TestItem(url=response.url)
'method which returns item @url http://scrapy.org @returns items 1 1'
def returns_dict_item(self, response):
return {'url': response.url}
'method which returns item @url http://scrapy.org @returns items 0 0'
def returns_fail(self, response):
return TestItem(url=response.url)
'method which returns item @url http://scrapy.org @returns items 0 0'
def returns_dict_fail(self, response):
return {'url': response.url}
'returns item with name and url @url http://scrapy.org @returns items 1 1 @scrapes name url'
def scrapes_item_ok(self, response):
return TestItem(name='test', url=response.url)
'returns item with name and url @url http://scrapy.org @returns items 1 1 @scrapes name url'
def scrapes_dict_item_ok(self, response):
return {'name': 'test', 'url': response.url}
'returns item with no name @url http://scrapy.org @returns items 1 1 @scrapes name url'
def scrapes_item_fail(self, response):
return TestItem(url=response.url)
'returns item with no name @url http://scrapy.org @returns items 1 1 @scrapes name url'
def scrapes_dict_item_fail(self, response):
return {'url': response.url}
'method with no url @returns items 1 1'
def parse_no_url(self, response):
pass
'Test if customization of request_fingerprint method will change output of request_seen.'
def test_request_fingerprint(self):
r1 = Request('http://scrapytest.org/index.html') r2 = Request('http://scrapytest.org/INDEX.html') dupefilter = RFPDupeFilter() dupefilter.open() assert (not dupefilter.request_seen(r1)) assert (not dupefilter.request_seen(r2)) dupefilter.close('finished') class CaseInsensitiveRFPDupeFilter(RFPDupeFilter, ): def request_fingerprint(self, request): fp = hashlib.sha1() fp.update(to_bytes(request.url.lower())) return fp.hexdigest() case_insensitive_dupefilter = CaseInsensitiveRFPDupeFilter() case_insensitive_dupefilter.open() assert (not case_insensitive_dupefilter.request_seen(r1)) assert case_insensitive_dupefilter.request_seen(r2) case_insensitive_dupefilter.close('finished')
'Anything that is not in the supplied sequence will evaluate as \'in\' the container.'
def test_set(self):
seq = set([(-3), 'test', 1.1]) d = SequenceExclude(seq) self.assertIn(0, d) self.assertIn('foo', d) self.assertIn(3.14, d) self.assertIn(set('bar'), d) self.assertRaises(TypeError, ((0, 1, 2) in d)) self.assertRaises(TypeError, d.__contains__, ['a', 'b', 'c']) for v in [(-3), 'test', 1.1]: self.assertNotIn(v, d)
'Simple selector tests'
def test_simple_selection(self):
body = "<p><input name='a'value='1'/><input name='b'value='2'/></p>" response = TextResponse(url='http://example.com', body=body, encoding='utf-8') sel = Selector(response) xl = sel.xpath('//input') self.assertEqual(2, len(xl)) for x in xl: assert isinstance(x, Selector) self.assertEqual(sel.xpath('//input').extract(), [x.extract() for x in sel.xpath('//input')]) self.assertEqual([x.extract() for x in sel.xpath("//input[@name='a']/@name")], [u'a']) self.assertEqual([x.extract() for x in sel.xpath("number(concat(//input[@name='a']/@value, //input[@name='b']/@value))")], [u'12.0']) self.assertEqual(sel.xpath("concat('xpath', 'rules')").extract(), [u'xpathrules']) self.assertEqual([x.extract() for x in sel.xpath("concat(//input[@name='a']/@value, //input[@name='b']/@value)")], [u'12'])
'Check that classes are using slots and are weak-referenceable'
def test_weakref_slots(self):
x = Selector(text='') weakref.ref(x) assert (not hasattr(x, '__dict__')), ('%s does not use __slots__' % x.__class__.__name__)
'Executes downloader mw manager\'s download method and returns the result (Request or Response) or raise exception in case of failure.'
def _download(self, request, response=None):
if (not response): response = Response(request.url) def download_func(**kwargs): return response dfd = self.mwman.download(download_func, request, self.spider) results = [] dfd.addBoth(results.append) self._wait(dfd) ret = results[0] if isinstance(ret, Failure): ret.raiseException() return ret
'Regression test for a failure when redirecting a compressed request. This happens when httpcompression middleware is executed before redirect middleware and attempts to decompress a non-compressed body. In particular when some website returns a 30x response with header \'Content-Encoding: gzip\' giving as result the error below: exceptions.IOError: Not a gzipped file'
def test_3xx_and_invalid_gzipped_body_must_redirect(self):
req = Request('http://example.com') body = '<p>You are being redirected</p>' resp = Response(req.url, status=302, body=body, headers={'Content-Length': str(len(body)), 'Content-Type': 'text/html', 'Content-Encoding': 'gzip', 'Location': 'http://example.com/login'}) ret = self._download(request=req, response=resp) self.assertTrue(isinstance(ret, Request), 'Not redirected: {0!r}'.format(ret)) self.assertEqual(to_bytes(ret.url), resp.headers['Location'], 'Not redirected to location header')
'Record a signal and its parameters'
def record_signal(self, *args, **kwargs):
signalargs = kwargs.copy() sig = signalargs.pop('signal') signalargs.pop('sender', None) self.signals_catched[sig] = signalargs
'Tests if "Content-Length: 0" is sent for bodyless POST requests. This is not strictly required by HTTP RFCs but can cause trouble for some web servers. See: https://github.com/scrapy/scrapy/issues/823 https://issues.apache.org/jira/browse/TS-2902 https://github.com/kennethreitz/requests/issues/405 https://bugs.python.org/issue14721'
def test_content_length_zero_bodyless_post_request_headers(self):
def _test(response): self.assertEqual(response.body, '0') request = Request(self.getURL('contentlength'), method='POST', headers={'Host': 'example.com'}) return self.download_request(request, Spider('foo')).addCallback(_test)
'Tests choosing of correct response type in case of Content-Type is empty but body contains text.'
def test_response_class_choosing_request(self):
body = 'Some plain text\ndata with tabs DCTB and null bytes\x00' def _test_type(response): self.assertEqual(type(response), TextResponse) request = Request(self.getURL('nocontenttype'), body=body) d = self.download_request(request, Spider('foo')) d.addCallback(_test_type) return d
'Test TunnelingTCP4ClientEndpoint'
@defer.inlineCallbacks def test_download_with_proxy_https_timeout(self):
http_proxy = self.getURL('') domain = 'https://no-such-domain.nosuch' request = Request(domain, meta={'proxy': http_proxy, 'download_timeout': 0.2}) d = self.download_request(request, Spider('foo')) timeout = (yield self.assertFailure(d, error.TimeoutError)) self.assertIn(domain, timeout.osError)
'Test that calling `fetch(url)` follows HTTP redirects by default.'
@defer.inlineCallbacks def test_fetch_redirect_follow_302(self):
url = self.url('/redirect-no-meta-refresh') code = "fetch('{0}')" (errcode, out, errout) = (yield self.execute(['-c', code.format(url)])) self.assertEqual(errcode, 0, out) assert ('Redirecting (302)' in errout) assert ('Crawled (200)' in errout)
'Test that calling `fetch(url, redirect=False)` disables automatic redirects.'
@defer.inlineCallbacks def test_fetch_redirect_not_follow_302(self):
url = self.url('/redirect-no-meta-refresh') code = "fetch('{0}', redirect=False)" (errcode, out, errout) = (yield self.execute(['-c', code.format(url)])) self.assertEqual(errcode, 0, out) assert ('Crawled (302)' in errout)
'Test Request copy'
def test_copy(self):
def somecallback(): pass r1 = self.request_class('http://www.example.com', callback=somecallback, errback=somecallback) r1.meta['foo'] = 'bar' r2 = r1.copy() assert (r1.callback is somecallback) assert (r1.errback is somecallback) assert (r2.callback is r1.callback) assert (r2.errback is r2.errback) assert (r1.meta is not r2.meta), 'meta must be a shallow copy, not identical' self.assertEqual(r1.meta, r2.meta) assert (r1.headers is not r2.headers), 'headers must be a shallow copy, not identical' self.assertEqual(r1.headers, r2.headers) self.assertEqual(r1.encoding, r2.encoding) self.assertEqual(r1.dont_filter, r2.dont_filter)
'Test Request children copies preserve their class'
def test_copy_inherited_classes(self):
class CustomRequest(self.request_class, ): pass r1 = CustomRequest('http://www.example.com') r2 = r1.copy() assert (type(r2) is CustomRequest)
'Test Request.replace() method'
def test_replace(self):
r1 = self.request_class('http://www.example.com', method='GET') hdrs = Headers(r1.headers) hdrs['key'] = 'value' r2 = r1.replace(method='POST', body='New body', headers=hdrs) self.assertEqual(r1.url, r2.url) self.assertEqual((r1.method, r2.method), ('GET', 'POST')) self.assertEqual((r1.body, r2.body), ('', 'New body')) self.assertEqual((r1.headers, r2.headers), (self.default_headers, hdrs)) r3 = self.request_class('http://www.example.com', meta={'a': 1}, dont_filter=True) r4 = r3.replace(url='http://www.example.com/2', body='', meta={}, dont_filter=False) self.assertEqual(r4.url, 'http://www.example.com/2') self.assertEqual(r4.body, '') self.assertEqual(r4.meta, {}) assert (r4.dont_filter is False)
'ipdata (Maxmind, thyme.apnic.net) functions'
def test_data(self):
(res, out, _) = RUN(['ivre', 'ipdata', '8.8.8.8']) self.assertEqual(res, 0) self.assertEqual(out, '8.8.8.8\n') res = RUN(['ivre', 'ipdata', '--init'], stdin=open(os.devnull))[0] self.assertEqual(res, 0) (res, out, _) = RUN(['ivre', 'ipdata', '8.8.8.8']) self.assertEqual(res, 0) self.assertEqual(out, '8.8.8.8\n') res = RUN(['ivre', 'ipdata', '--download'])[0] self.assertEqual(res, 0) res = RUN(['ivre', 'ipdata', '--import-all', '--no-update-passive-db'])[0] self.assertEqual(res, 0) (res, out, _) = RUN(['ivre', 'ipdata', '8.8.8.8']) self.assertEqual(res, 0) out = sorted(((' coordinates (37.751, -97.822)' if (x == ' coordinates (37.750999999999998, -97.822000000000003)') else x) for x in out.splitlines())) self.assertEqual(out, sorted('8.8.8.8\n as_num 15169\n as_name Google Inc.\n coordinates (37.751, -97.822)\n country_code US\n country_name United States\n'.splitlines())) (res, out, _) = RUN(['ivre', 'runscans', '--output', 'Count', '--asnum', '15169']) self.assertEqual(res, 0) self.assertEqual(out, 'AS15169 has 2685951 IPs.\n') (res, out, _) = RUN(['ivre', 'runscans', '--output', 'Count', '--country', 'US']) self.assertEqual(res, 0) self.assertEqual(out, 'US has 1595080627 IPs.\n') (res, out, _) = RUN(['ivre', 'runscans', '--output', 'List', '--country', 'A2']) self.assertEqual(res, 0) self.assertEqual(out, '5.145.149.142 - 5.145.149.142\n57.72.6.0 - 57.72.6.255\n62.56.206.0 - 62.56.206.255\n62.128.160.0 - 62.128.160.255\n62.128.167.0 - 62.128.167.255\n62.145.35.0 - 62.145.35.255\n77.220.0.0 - 77.220.7.255\n78.41.29.0 - 78.41.29.255\n78.41.227.0 - 78.41.227.255\n80.78.16.152 - 80.78.16.167\n80.78.16.192 - 80.78.16.207\n80.78.16.224 - 80.78.16.224\n80.78.19.57 - 80.78.19.63\n80.78.19.233 - 80.78.19.239\n80.231.5.0 - 80.231.5.255\n82.206.239.0 - 82.206.239.255\n83.229.22.0 - 83.229.22.255\n84.22.67.0 - 84.22.67.255\n86.62.5.0 - 86.62.5.255\n86.62.30.0 - 86.62.30.255\n87.234.247.0 - 87.234.247.255\n93.93.101.96 - 93.93.101.127\n93.93.102.96 - 93.93.102.127\n111.90.150.0 - 111.90.150.255\n185.38.108.0 - 185.38.108.255\n196.15.8.0 - 196.15.8.255\n196.15.10.0 - 196.15.11.255\n196.47.77.0 - 196.47.78.255\n196.201.132.0 - 196.201.132.255\n196.201.135.0 - 196.201.135.255\n196.201.148.0 - 196.201.148.255\n199.190.44.0 - 199.190.47.255\n213.193.49.0 - 213.193.49.255\n216.147.155.0 - 216.147.155.255\n217.30.26.0 - 217.30.26.255\n217.175.75.0 - 217.175.75.255\n') (res, out, _) = RUN(['ivre', 'runscans', '--output', 'ListCIDRs', '--country', 'A1']) self.assertEqual(res, 0) self.assertEqual(out, '31.14.133.39/32\n37.221.172.0/23\n46.19.137.0/24\n46.19.143.0/24\n50.7.78.88/31\n62.73.8.0/23\n63.235.155.210/32\n64.12.118.23/32\n64.12.118.88/32\n67.43.156.0/24\n69.10.139.0/24\n70.232.245.0/24\n74.82.9.224/32\n80.254.74.0/23\n93.115.82.0/23\n93.115.84.0/23\n96.47.226.20/32\n147.203.120.0/24\n176.9.75.43/32\n185.36.100.145/32\n192.238.21.0/24\n193.107.17.71/32\n193.200.150.0/24\n198.144.105.88/32\n199.114.223.0/24\n199.188.236.0/23\n200.200.200.200/32\n206.71.162.0/24\n206.196.103.0/24\n208.43.225.52/32\n209.216.198.0/24\n213.234.249.115/32\n216.151.180.0/24\n') (res, out1, _) = RUN(['ivre', 'runscans', '--output', 'ListAll', '--country', 'A1']) self.assertEqual(res, 0) (res, out2, _) = RUN(['ivre', 'runscans', '--output', 'ListAllRand', '--country', 'A1']) self.assertEqual(res, 0) (out1, out2) = (out1.split('\n'), out2.split('\n')) self.assertGreater(len(out1), 0) self.assertItemsEqual(out1, out2) (res, out1, _) = RUN(['ivre', 'runscans', '--output', 'ListAll', '--region', 'GP', 'R5']) self.assertEqual(res, 0) (res, out2, _) = RUN(['ivre', 'runscans', '--output', 'ListAllRand', '--region', 'GP', 'R5']) self.assertEqual(res, 0) (out1, out2) = (out1.split('\n'), out2.split('\n')) self.assertGreater(len(out1), 0) self.assertItemsEqual(out1, out2) (res, out1, _) = RUN(['ivre', 'runscans', '--output', 'ListAll', '--city', 'FR', 'Carcassonne']) self.assertEqual(res, 0) (res, out2, _) = RUN(['ivre', 'runscans', '--output', 'ListAllRand', '--city', 'FR', 'Carcassonne']) self.assertEqual(res, 0) (out1, out2) = (out1.split('\n'), out2.split('\n')) self.assertGreater(len(out1), 0) self.assertItemsEqual(out1, out2) (res, out1, _) = RUN(['ivre', 'runscans', '--output', 'ListAll', '--asnum', '12345']) self.assertEqual(res, 0) (res, out2, _) = RUN(['ivre', 'runscans', '--output', 'ListAllRand', '--asnum', '12345']) self.assertEqual(res, 0) (out1, out2) = (out1.split('\n'), out2.split('\n')) self.assertGreater(len(out1), 0) self.assertItemsEqual(out1, out2) res = RUN(['ivre', 'ipdata', '--init'], stdin=open(os.devnull))[0] self.assertEqual(res, 0)
'Functions that have not yet been tested'
def test_utils(self):
self.assertIsNotNone(ivre.config.guess_prefix()) self.assertIsNone(ivre.config.guess_prefix('inexistant')) (res, out1, err) = RUN(['ivre']) self.assertEqual(res, 0) self.assertTrue((not err)) (res, out2, err) = RUN(['ivre', 'help']) self.assertEqual(res, 0) self.assertTrue((not err)) self.assertEqual(out1, out2) (res, _, err) = RUN(['ivre', 'version']) self.assertEqual(res, 0) self.assertTrue((not err)) (res, _, _) = RUN(['ivre', 'inexistant']) self.assertTrue(res) with self.assertRaises(ValueError): ivre.utils.range2nets((2, 1)) teststr = "TEST STRING -./*'" self.assertEqual(ivre.utils.regexp2pattern(teststr), (re.escape(teststr), 0)) self.assertEqual(ivre.utils.regexp2pattern(re.compile((('^' + re.escape(teststr)) + '$'))), (re.escape(teststr), 0)) self.assertEqual(ivre.utils.regexp2pattern(re.compile(re.escape(teststr))), ((('.*' + re.escape(teststr)) + '.*'), 0)) self.assertEqual(ivre.utils.str2list(teststr), teststr) teststr = '1,2|3' self.assertItemsEqual(ivre.utils.str2list(teststr), ['1', '2', '3']) self.assertTrue(ivre.utils.isfinal(1)) self.assertTrue(ivre.utils.isfinal('1')) self.assertFalse(ivre.utils.isfinal([])) self.assertFalse(ivre.utils.isfinal({})) ports = [1, 3, 2, 4, 6, 80, 5, 5, 110, 111] self.assertEqual(set(ports), ivre.utils.nmapspec2ports(ivre.utils.ports2nmapspec(ports))) self.assertEqual(ivre.utils.ports2nmapspec(ports), '1-6,80,110-111') ivre.config.NMAP_SHARE_PATH = './share/nmap/' ivre.utils.makedirs(ivre.config.NMAP_SHARE_PATH) with open(os.path.join(ivre.config.NMAP_SHARE_PATH, 'nmap-service-probes'), 'w') as fdesc: fdesc.write('Probe TCP NULL q||\nmatch test m|^test$|\nsoftmatch softtest m|^softtest$|\n') self.assertTrue(any(((not fp[1]['soft']) for fp in ivre.utils.get_nmap_svc_fp()['fp']))) self.assertTrue(any((fp[1]['soft'] for fp in ivre.utils.get_nmap_svc_fp()['fp']))) ivre.utils.cleandir(ivre.config.NMAP_SHARE_PATH) def is_prime(n): if ((n == 2) or (n == 3)): return True if ((n < 2) or ((n % 2) == 0)): return False if (n < 9): return True if ((n % 3) == 0): return False r = int((n ** 0.5)) f = 5 while (f <= r): if ((n % f) == 0): return False if ((n % (f + 2)) == 0): return False f += 6 return True for _ in range(3): nbr = random.randint(2, 1000) factors = list(ivre.mathutils.factors(nbr)) self.assertTrue((is_prime(nbr) or (len(factors) > 1))) self.assertTrue(all((is_prime(x) for x in factors))) self.assertEqual(reduce((lambda x, y: (x * y)), factors), nbr)
'Run scans, with and without agents'
def test_scans(self):
(res, out, _) = RUN(['ivre', 'runscans', '--output', 'Test', '--test', '2']) self.assertEqual(res, 0) self.assertTrue(('\nRead address 127.0.0.1\n' in out)) self.assertTrue(('\nRead address 127.0.0.2\n' in out)) res = RUN(['ivre', 'runscans', '--network', '127.0.0.1/31'])[0] self.assertEqual(res, 0) fdesc = tempfile.NamedTemporaryFile(delete=False) fdesc.writelines((('127.0.0.%d\n' % i).encode() for i in range(2, 4))) fdesc.close() res = RUN(['ivre', 'runscans', '--file', fdesc.name, '--output', 'XMLFork'])[0] self.assertEqual(res, 0) os.unlink(fdesc.name) res = RUN(['ivre', 'runscans', '--range', '127.0.0.4', '127.0.0.5', '--output', 'XMLFull'])[0] count = sum((len(walk_elt[2]) for walk_elt in os.walk('scans'))) self.assertEqual(count, 9) res = RUN(['ivre', 'runscans', '--output', 'CommandLine'])[0] self.assertEqual(res, 0) (res, out, _) = RUN(['ivre', 'runscans', '--output', 'Agent']) self.assertEqual(res, 0) with open('ivre-agent.sh', 'wb') as fdesc: fdesc.write(out) os.chmod('ivre-agent.sh', 493) ivre.utils.makedirs('input') ivre.utils.makedirs('tmp') pid_agent = subprocess.Popen([os.path.join(os.getcwd(), 'ivre-agent.sh')], preexec_fn=os.setsid, cwd='tmp').pid res = RUN(['ivre', 'runscansagent', '--test', '2', '--feed', './tmp/'], stdin=open(os.devnull, 'wb'))[0] self.assertEqual(res, 0) feed_cmd = ['runscansagent', '--sync', './tmp/'] if USE_COVERAGE: feed_cmd = ((COVERAGE + ['run', '--parallel-mode', which('ivre')]) + feed_cmd) else: feed_cmd = (['ivre'] + feed_cmd) pid_feed = subprocess.Popen(feed_cmd).pid while any((walk[2] for dirname in ['agentsdata/._tmp_/input', 'agentsdata/._tmp_/remoteinput', 'agentsdata/._tmp_/remotecur', 'tmp/input', 'tmp/cur', 'tmp/output'] for walk in os.walk(dirname))): print(u'Waiting for runscans sync & agent') time.sleep(2) os.kill(pid_agent, signal.SIGTERM) os.kill(pid_feed, signal.SIGTERM) os.waitpid(pid_agent, 0) os.waitpid(pid_feed, 0) count = sum((len(walk_elt[2]) for walk_elt in os.walk('agentsdata/._tmp_/remoteoutput/'))) self.assertEqual(count, 2) pid_agent = subprocess.Popen([os.path.join(os.getcwd(), 'ivre-agent.sh')], preexec_fn=os.setsid, cwd='tmp').pid self.init_nmap_db() res = RUN(['ivre', 'runscansagentdb', '--init'], stdin=open(os.devnull))[0] self.assertEqual(res, 0) res = RUN(['ivre', 'runscansagentdb', '--add-local-master'])[0] self.assertEqual(res, 0) res = RUN(['ivre', 'runscansagentdb', '--source', 'TEST-AGENT-SOURCE', '--add-agent', os.path.join(os.getcwd(), 'tmp')])[0] res = RUN(['ivre', 'runscansagentdb', '--test', '2', '--assign-free-agents'])[0] self.assertEqual(res, 0) fdesc = tempfile.NamedTemporaryFile(delete=False) fdesc.writelines((('127.0.0.%d\n' % i).encode() for i in range(3, 5))) fdesc.close() res = RUN(['ivre', 'runscansagentdb', '--file', fdesc.name, '--assign-free-agents'])[0] self.assertEqual(res, 0) daemon_cmd = ['runscansagentdb', '--daemon'] if USE_COVERAGE: daemon_cmd = ((COVERAGE + ['run', '--parallel-mode', which('ivre')]) + daemon_cmd) else: daemon_cmd = (['ivre'] + daemon_cmd) pid_daemon = subprocess.Popen(daemon_cmd).pid time.sleep(4) scanmatch = re.compile('scan:\n - id: (?P<id>[0-9a-f]+)\n.*\n.*\n - targets added: (?P<nbadded>\\d+)\n - results fetched: (?P<nbfetched>\\d+)\n - total targets to add: (?P<nbtargets>\\d+)\n') is_scan_over = (lambda scan: (int(scan['nbtargets']) == int(scan['nbfetched']))) while True: (res, out, _) = RUN(['ivre', 'runscansagentdb', '--list-scans']) scans = [scan.groupdict() for scan in scanmatch.finditer(out)] self.assertEqual(len(scans), 2) if any((is_scan_over(scan) for scan in scans)): break time.sleep(2) scan = next((scan for scan in scans if (not is_scan_over(scan)))) agentmatch = re.compile('agent:\n - id: (?P<id>[0-9a-f]+)\n') (res, out, _) = RUN(['ivre', 'runscansagentdb', '--list-agents']) agents = [agent.groupdict() for agent in agentmatch.finditer(out)] self.assertEqual(len(agents), 1) agent = agents[0] res = RUN(['ivre', 'runscansagentdb', '--assign', ('%s:%s' % (agent['id'].decode(), scan['id'].decode()))])[0] time.sleep(4) while True: (res, out, _) = RUN(['ivre', 'runscansagentdb', '--list-scans']) scans = [scan.groupdict() for scan in scanmatch.finditer(out)] self.assertEqual(len(scans), 2) if all((is_scan_over(scan) for scan in scans)): break time.sleep(2) while any((walk[2] for dirname in ['tmp/input', 'tmp/cur', 'tmp/output', ivre.config.AGENT_MASTER_PATH] for walk in os.walk(dirname) if (not (walk[0].startswith(os.path.join(ivre.config.AGENT_MASTER_PATH, 'output', '')) or ((walk[0] == ivre.config.AGENT_MASTER_PATH) and (walk[2] == ['whoami'])))))): print(u'Waiting for runscans daemon & agent') time.sleep(2) os.kill(pid_agent, signal.SIGTERM) os.kill(pid_daemon, signal.SIGTERM) os.waitpid(pid_agent, 0) os.waitpid(pid_daemon, 0) (res, out, _) = RUN(['ivre', 'scancli', '--count']) self.assertEqual(res, 0) self.assertEqual(int(out), 4) self.assertEqual(RUN(['ivre', 'scancli', '--init'], stdin=open(os.devnull))[0], 0) self.assertEqual(RUN(['ivre', 'runscansagentdb', '--init'], stdin=open(os.devnull))[0], 0)
'Returns a condition that is true iff all of the given conditions is true.'
def flt_and(self, *args):
return reduce(self._flt_and, args)
'Returns a condition that is true iff both `cond1` and `cond2` are true. This is typically implemented in the backend-specific subclass.'
@staticmethod def _flt_and(cond1, cond2):
raise NotImplementedError
'Returns a condition that is true iff any of the given conditions is true.'
def flt_or(self, *args):
return reduce(self._flt_or, args)
'Returns a condition that is true iff either `cond1` or `cond2` is true. This is typically implemented in the backend-specific subclass.'
@staticmethod def _flt_or(cond1, cond2):
raise NotImplementedError
'Filters documents based on their schema\'s version.'
@staticmethod def searchversion(version):
raise NotImplementedError
'Filters (if `neg` == True, filters out) one particular IP network (CIDR notation).'
def searchnet(self, net, neg=False):
return self.searchrange(neg=neg, *utils.net2range(net))
'Filters (if `neg` == True, filters out) one particular IP range given its boudaries `start` and `stop`.'
@staticmethod def searchrange(start, stop, neg=False):
raise NotImplementedError
'Finds phpMyAdmin instances based on its cookies.'
def searchphpmyadmin(self):
return self.searchcookie('phpMyAdmin')
'Finds specific cookie names. This is typically implemented in the backend-specific purpose-specific subclass.'
def searchcookie(self, name):
raise NotImplementedError
'Finds shared files or directories that are typical of a web application. Being able to write web files often leads to arbitrary code execution. Being able to read directly web files (without a PHP/ASP/... interpreter) often leads to privilege escalation in the application and sometimes to arbitrary code execution by finding backdoors/shells/vulnerabilities.'
def searchwebfiles(self):
return self.searchfile(fname=re.compile('vhost|www|web\\.config|\\.htaccess|\\.([aj]sp|php|html?|js|css)', re.I))
'Finds shared files or directories from a name or a pattern.'
def searchfile(self, fname=None, scripts=None):
raise NotImplementedError
'Finds Java User-Agent.'
def searchjavaua(self):
return self.searchuseragent(re.compile('(^| )(Java|javaws)/', flags=0))
'Finds specified User-Agent(s).'
@staticmethod def searchuseragent(useragent):
raise NotImplementedError
'Gets a cursor, which can be iterated to get results. The type of that cursor is backend-specific, and this is typically implemented in the backend-specific subclasses'
def get(self, spec, **kargs):
raise NotImplementedError
'Gets a unique identifier for a specified `record`. The type of the identifier is backend-specific, and this is typically implemented in the backend-specific subclasses'
@staticmethod def getid(record):
return record['_id']
'Gets a specific record given its unique identifier `idval`. Alias for .searchobjectid().'
@classmethod def searchid(cls, oid, neg=False):
return cls.searchobjectid(oid, neg=neg)
'Filters records by their ObjectID. `oid` can be a single or many (as a list or any iterable) object ID(s), specified as strings or an `ObjectID`s.'
@classmethod def searchobjectid(cls, oid, neg=False):
raise NotImplementedError