{"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler.__init__","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Initializes the Crawl class","docstring_summary":"Initializes the Crawl class","docstring_tokens":["Initializes","the","Crawl","class"],"function":"def __init__(self):\n \"\"\"\n Initializes the Crawl class\n \"\"\"\n self._config = {}\n self._links = []\n self._start_time = None","function_tokens":["def","__init__","(","self",")",":","self",".","_config","=","{","}","self",".","_links","=","[","]","self",".","_start_time","=","None"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L26-L32"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler._request","parameters":"(self, url)","argument_list":"","return_statement":"return response","docstring":"Sends a POST\/GET requests using a random user agent\n :param url: the url to visit\n :return: the response Requests object","docstring_summary":"Sends a POST\/GET requests using a random user agent\n :param url: the url to visit\n :return: the response Requests object","docstring_tokens":["Sends","a","POST","\/","GET","requests","using","a","random","user","agent",":","param","url",":","the","url","to","visit",":","return",":","the","response","Requests","object"],"function":"def _request(self, url):\n \"\"\"\n Sends a POST\/GET requests using a random user agent\n :param url: the url to visit\n :return: the response Requests object\n \"\"\"\n random_user_agent = random.choice(self._config[\"user_agents\"])\n headers = {'user-agent': random_user_agent}\n\n response = requests.get(url, headers=headers, timeout=5)\n\n return response","function_tokens":["def","_request","(","self",",","url",")",":","random_user_agent","=","random",".","choice","(","self",".","_config","[","\"user_agents\"","]",")","headers","=","{","'user-agent'",":","random_user_agent","}","response","=","requests",".","get","(","url",",","headers","=","headers",",","timeout","=","5",")","return","response"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L40-L51"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler._normalize_link","parameters":"(link, root_url)","argument_list":"","return_statement":"return link","docstring":"Normalizes links extracted from the DOM by making them all absolute, so\n we can request them, for example, turns a \"\/images\" link extracted from https:\/\/imgur.com\n to \"https:\/\/imgur.com\/images\"\n :param link: link found in the DOM\n :param root_url: the URL the DOM was loaded from\n :return: absolute link","docstring_summary":"Normalizes links extracted from the DOM by making them all absolute, so\n we can request them, for example, turns a \"\/images\" link extracted from https:\/\/imgur.com\n to \"https:\/\/imgur.com\/images\"\n :param link: link found in the DOM\n :param root_url: the URL the DOM was loaded from\n :return: absolute link","docstring_tokens":["Normalizes","links","extracted","from","the","DOM","by","making","them","all","absolute","so","we","can","request","them","for","example","turns","a","\/","images","link","extracted","from","https",":","\/\/","imgur",".","com","to","https",":","\/\/","imgur",".","com","\/","images",":","param","link",":","link","found","in","the","DOM",":","param","root_url",":","the","URL","the","DOM","was","loaded","from",":","return",":","absolute","link"],"function":"def _normalize_link(link, root_url):\n \"\"\"\n Normalizes links extracted from the DOM by making them all absolute, so\n we can request them, for example, turns a \"\/images\" link extracted from https:\/\/imgur.com\n to \"https:\/\/imgur.com\/images\"\n :param link: link found in the DOM\n :param root_url: the URL the DOM was loaded from\n :return: absolute link\n \"\"\"\n try:\n parsed_url = urlparse(link)\n except ValueError:\n # urlparse can get confused about urls with the ']'\n # character and thinks it must be a malformed IPv6 URL\n return None\n parsed_root_url = urlparse(root_url)\n\n # '\/\/' means keep the current protocol used to access this URL\n if link.startswith(\"\/\/\"):\n return \"{}:\/\/{}{}\".format(parsed_root_url.scheme, parsed_url.netloc, parsed_url.path)\n\n # possibly a relative path\n if not parsed_url.scheme:\n return urljoin(root_url, link)\n\n return link","function_tokens":["def","_normalize_link","(","link",",","root_url",")",":","try",":","parsed_url","=","urlparse","(","link",")","except","ValueError",":","# urlparse can get confused about urls with the ']'","# character and thinks it must be a malformed IPv6 URL","return","None","parsed_root_url","=","urlparse","(","root_url",")","# '\/\/' means keep the current protocol used to access this URL","if","link",".","startswith","(","\"\/\/\"",")",":","return","\"{}:\/\/{}{}\"",".","format","(","parsed_root_url",".","scheme",",","parsed_url",".","netloc",",","parsed_url",".","path",")","# possibly a relative path","if","not","parsed_url",".","scheme",":","return","urljoin","(","root_url",",","link",")","return","link"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L54-L79"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler._is_valid_url","parameters":"(url)","argument_list":"","return_statement":"return re.match(regex, url) is not None","docstring":"Check if a url is a valid url.\n Used to filter out invalid values that were found in the \"href\" attribute,\n for example \"javascript:void(0)\"\n taken from https:\/\/stackoverflow.com\/questions\/7160737\n :param url: url to be checked\n :return: boolean indicating whether the URL is valid or not","docstring_summary":"Check if a url is a valid url.\n Used to filter out invalid values that were found in the \"href\" attribute,\n for example \"javascript:void(0)\"\n taken from https:\/\/stackoverflow.com\/questions\/7160737\n :param url: url to be checked\n :return: boolean indicating whether the URL is valid or not","docstring_tokens":["Check","if","a","url","is","a","valid","url",".","Used","to","filter","out","invalid","values","that","were","found","in","the","href","attribute","for","example","javascript",":","void","(","0",")","taken","from","https",":","\/\/","stackoverflow",".","com","\/","questions","\/","7160737",":","param","url",":","url","to","be","checked",":","return",":","boolean","indicating","whether","the","URL","is","valid","or","not"],"function":"def _is_valid_url(url):\n \"\"\"\n Check if a url is a valid url.\n Used to filter out invalid values that were found in the \"href\" attribute,\n for example \"javascript:void(0)\"\n taken from https:\/\/stackoverflow.com\/questions\/7160737\n :param url: url to be checked\n :return: boolean indicating whether the URL is valid or not\n \"\"\"\n regex = re.compile(\n r'^(?:http|ftp)s?:\/\/' # http:\/\/ or https:\/\/\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' # domain...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:\/?|[\/?]\\S+)$', re.IGNORECASE)\n return re.match(regex, url) is not None","function_tokens":["def","_is_valid_url","(","url",")",":","regex","=","re",".","compile","(","r'^(?:http|ftp)s?:\/\/'","# http:\/\/ or https:\/\/","r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'","# domain...","r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})'","# ...or ip","r'(?::\\d+)?'","# optional port","r'(?:\/?|[\/?]\\S+)$'",",","re",".","IGNORECASE",")","return","re",".","match","(","regex",",","url",")","is","not","None"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L82-L97"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler._is_blacklisted","parameters":"(self, url)","argument_list":"","return_statement":"return any(blacklisted_url in url for blacklisted_url in self._config[\"blacklisted_urls\"])","docstring":"Checks is a URL is blacklisted\n :param url: full URL\n :return: boolean indicating whether a URL is blacklisted or not","docstring_summary":"Checks is a URL is blacklisted\n :param url: full URL\n :return: boolean indicating whether a URL is blacklisted or not","docstring_tokens":["Checks","is","a","URL","is","blacklisted",":","param","url",":","full","URL",":","return",":","boolean","indicating","whether","a","URL","is","blacklisted","or","not"],"function":"def _is_blacklisted(self, url):\n \"\"\"\n Checks is a URL is blacklisted\n :param url: full URL\n :return: boolean indicating whether a URL is blacklisted or not\n \"\"\"\n return any(blacklisted_url in url for blacklisted_url in self._config[\"blacklisted_urls\"])","function_tokens":["def","_is_blacklisted","(","self",",","url",")",":","return","any","(","blacklisted_url","in","url","for","blacklisted_url","in","self",".","_config","[","\"blacklisted_urls\"","]",")"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L99-L105"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler._should_accept_url","parameters":"(self, url)","argument_list":"","return_statement":"return url and self._is_valid_url(url) and not self._is_blacklisted(url)","docstring":"filters url if it is blacklisted or not valid, we put filtering logic here\n :param url: full url to be checked\n :return: boolean of whether or not the url should be accepted and potentially visited","docstring_summary":"filters url if it is blacklisted or not valid, we put filtering logic here\n :param url: full url to be checked\n :return: boolean of whether or not the url should be accepted and potentially visited","docstring_tokens":["filters","url","if","it","is","blacklisted","or","not","valid","we","put","filtering","logic","here",":","param","url",":","full","url","to","be","checked",":","return",":","boolean","of","whether","or","not","the","url","should","be","accepted","and","potentially","visited"],"function":"def _should_accept_url(self, url):\n \"\"\"\n filters url if it is blacklisted or not valid, we put filtering logic here\n :param url: full url to be checked\n :return: boolean of whether or not the url should be accepted and potentially visited\n \"\"\"\n return url and self._is_valid_url(url) and not self._is_blacklisted(url)","function_tokens":["def","_should_accept_url","(","self",",","url",")",":","return","url","and","self",".","_is_valid_url","(","url",")","and","not","self",".","_is_blacklisted","(","url",")"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L107-L113"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler._extract_urls","parameters":"(self, body, root_url)","argument_list":"","return_statement":"return filtered_urls","docstring":"gathers links to be visited in the future from a web page's body.\n does it by finding \"href\" attributes in the DOM\n :param body: the HTML body to extract links from\n :param root_url: the root URL of the given body\n :return: list of extracted links","docstring_summary":"gathers links to be visited in the future from a web page's body.\n does it by finding \"href\" attributes in the DOM\n :param body: the HTML body to extract links from\n :param root_url: the root URL of the given body\n :return: list of extracted links","docstring_tokens":["gathers","links","to","be","visited","in","the","future","from","a","web","page","s","body",".","does","it","by","finding","href","attributes","in","the","DOM",":","param","body",":","the","HTML","body","to","extract","links","from",":","param","root_url",":","the","root","URL","of","the","given","body",":","return",":","list","of","extracted","links"],"function":"def _extract_urls(self, body, root_url):\n \"\"\"\n gathers links to be visited in the future from a web page's body.\n does it by finding \"href\" attributes in the DOM\n :param body: the HTML body to extract links from\n :param root_url: the root URL of the given body\n :return: list of extracted links\n \"\"\"\n pattern = r\"href=[\\\"'](?!#)(.*?)[\\\"'].*?\" # ignore links starting with #, no point in re-visiting the same page\n urls = re.findall(pattern, str(body))\n\n normalize_urls = [self._normalize_link(url, root_url) for url in urls]\n filtered_urls = list(filter(self._should_accept_url, normalize_urls))\n\n return filtered_urls","function_tokens":["def","_extract_urls","(","self",",","body",",","root_url",")",":","pattern","=","r\"href=[\\\"'](?!#)(.*?)[\\\"'].*?\"","# ignore links starting with #, no point in re-visiting the same page","urls","=","re",".","findall","(","pattern",",","str","(","body",")",")","normalize_urls","=","[","self",".","_normalize_link","(","url",",","root_url",")","for","url","in","urls","]","filtered_urls","=","list","(","filter","(","self",".","_should_accept_url",",","normalize_urls",")",")","return","filtered_urls"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L115-L129"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler._remove_and_blacklist","parameters":"(self, link)","argument_list":"","return_statement":"","docstring":"Removes a link from our current links list\n and blacklists it so we don't visit it in the future\n :param link: link to remove and blacklist","docstring_summary":"Removes a link from our current links list\n and blacklists it so we don't visit it in the future\n :param link: link to remove and blacklist","docstring_tokens":["Removes","a","link","from","our","current","links","list","and","blacklists","it","so","we","don","t","visit","it","in","the","future",":","param","link",":","link","to","remove","and","blacklist"],"function":"def _remove_and_blacklist(self, link):\n \"\"\"\n Removes a link from our current links list\n and blacklists it so we don't visit it in the future\n :param link: link to remove and blacklist\n \"\"\"\n self._config['blacklisted_urls'].append(link)\n del self._links[self._links.index(link)]","function_tokens":["def","_remove_and_blacklist","(","self",",","link",")",":","self",".","_config","[","'blacklisted_urls'","]",".","append","(","link",")","del","self",".","_links","[","self",".","_links",".","index","(","link",")","]"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L131-L138"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler._browse_from_links","parameters":"(self, depth=0)","argument_list":"","return_statement":"","docstring":"Selects a random link out of the available link list and visits it.\n Blacklists any link that is not responsive or that contains no other links.\n Please note that this function is recursive and will keep calling itself until\n a dead end has reached or when we ran out of links\n :param depth: our current link depth","docstring_summary":"Selects a random link out of the available link list and visits it.\n Blacklists any link that is not responsive or that contains no other links.\n Please note that this function is recursive and will keep calling itself until\n a dead end has reached or when we ran out of links\n :param depth: our current link depth","docstring_tokens":["Selects","a","random","link","out","of","the","available","link","list","and","visits","it",".","Blacklists","any","link","that","is","not","responsive","or","that","contains","no","other","links",".","Please","note","that","this","function","is","recursive","and","will","keep","calling","itself","until","a","dead","end","has","reached","or","when","we","ran","out","of","links",":","param","depth",":","our","current","link","depth"],"function":"def _browse_from_links(self, depth=0):\n \"\"\"\n Selects a random link out of the available link list and visits it.\n Blacklists any link that is not responsive or that contains no other links.\n Please note that this function is recursive and will keep calling itself until\n a dead end has reached or when we ran out of links\n :param depth: our current link depth\n \"\"\"\n is_depth_reached = depth >= self._config['max_depth']\n if not len(self._links) or is_depth_reached:\n logging.debug(\"Hit a dead end, moving to the next root URL\")\n # escape from the recursion, we don't have links to continue or we have reached the max depth\n return\n\n if self._is_timeout_reached():\n raise self.CrawlerTimedOut\n\n random_link = random.choice(self._links)\n try:\n logging.info(\"Visiting {}\".format(random_link))\n sub_page = self._request(random_link).content\n sub_links = self._extract_urls(sub_page, random_link)\n\n # sleep for a random amount of time\n time.sleep(random.randrange(self._config[\"min_sleep\"], self._config[\"max_sleep\"]))\n\n # make sure we have more than 1 link to pick from\n if len(sub_links) > 1:\n # extract links from the new page\n self._links = self._extract_urls(sub_page, random_link)\n else:\n # else retry with current link list\n # remove the dead-end link from our list\n self._remove_and_blacklist(random_link)\n\n except requests.exceptions.RequestException:\n logging.debug(\"Exception on URL: %s, removing from list and trying again!\" % random_link)\n self._remove_and_blacklist(random_link)\n\n self._browse_from_links(depth + 1)","function_tokens":["def","_browse_from_links","(","self",",","depth","=","0",")",":","is_depth_reached","=","depth",">=","self",".","_config","[","'max_depth'","]","if","not","len","(","self",".","_links",")","or","is_depth_reached",":","logging",".","debug","(","\"Hit a dead end, moving to the next root URL\"",")","# escape from the recursion, we don't have links to continue or we have reached the max depth","return","if","self",".","_is_timeout_reached","(",")",":","raise","self",".","CrawlerTimedOut","random_link","=","random",".","choice","(","self",".","_links",")","try",":","logging",".","info","(","\"Visiting {}\"",".","format","(","random_link",")",")","sub_page","=","self",".","_request","(","random_link",")",".","content","sub_links","=","self",".","_extract_urls","(","sub_page",",","random_link",")","# sleep for a random amount of time","time",".","sleep","(","random",".","randrange","(","self",".","_config","[","\"min_sleep\"","]",",","self",".","_config","[","\"max_sleep\"","]",")",")","# make sure we have more than 1 link to pick from","if","len","(","sub_links",")",">","1",":","# extract links from the new page","self",".","_links","=","self",".","_extract_urls","(","sub_page",",","random_link",")","else",":","# else retry with current link list","# remove the dead-end link from our list","self",".","_remove_and_blacklist","(","random_link",")","except","requests",".","exceptions",".","RequestException",":","logging",".","debug","(","\"Exception on URL: %s, removing from list and trying again!\"","%","random_link",")","self",".","_remove_and_blacklist","(","random_link",")","self",".","_browse_from_links","(","depth","+","1",")"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L140-L179"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler.load_config_file","parameters":"(self, file_path)","argument_list":"","return_statement":"","docstring":"Loads and decodes a JSON config file, sets the config of the crawler instance\n to the loaded one\n :param file_path: path of the config file\n :return:","docstring_summary":"Loads and decodes a JSON config file, sets the config of the crawler instance\n to the loaded one\n :param file_path: path of the config file\n :return:","docstring_tokens":["Loads","and","decodes","a","JSON","config","file","sets","the","config","of","the","crawler","instance","to","the","loaded","one",":","param","file_path",":","path","of","the","config","file",":","return",":"],"function":"def load_config_file(self, file_path):\n \"\"\"\n Loads and decodes a JSON config file, sets the config of the crawler instance\n to the loaded one\n :param file_path: path of the config file\n :return:\n \"\"\"\n with open(file_path, 'r') as config_file:\n config = json.load(config_file)\n self.set_config(config)","function_tokens":["def","load_config_file","(","self",",","file_path",")",":","with","open","(","file_path",",","'r'",")","as","config_file",":","config","=","json",".","load","(","config_file",")","self",".","set_config","(","config",")"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L181-L190"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler.set_config","parameters":"(self, config)","argument_list":"","return_statement":"","docstring":"Sets the config of the crawler instance to the provided dict\n :param config: dict of configuration options, for example:\n {\n \"root_urls\": [],\n \"blacklisted_urls\": [],\n \"click_depth\": 5\n ...\n }","docstring_summary":"Sets the config of the crawler instance to the provided dict\n :param config: dict of configuration options, for example:\n {\n \"root_urls\": [],\n \"blacklisted_urls\": [],\n \"click_depth\": 5\n ...\n }","docstring_tokens":["Sets","the","config","of","the","crawler","instance","to","the","provided","dict",":","param","config",":","dict","of","configuration","options","for","example",":","{","root_urls",":","[]","blacklisted_urls",":","[]","click_depth",":","5","...","}"],"function":"def set_config(self, config):\n \"\"\"\n Sets the config of the crawler instance to the provided dict\n :param config: dict of configuration options, for example:\n {\n \"root_urls\": [],\n \"blacklisted_urls\": [],\n \"click_depth\": 5\n ...\n }\n \"\"\"\n self._config = config","function_tokens":["def","set_config","(","self",",","config",")",":","self",".","_config","=","config"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L192-L203"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler.set_option","parameters":"(self, option, value)","argument_list":"","return_statement":"","docstring":"Sets a specific key in the config dict\n :param option: the option key in the config, for example: \"max_depth\"\n :param value: value for the option","docstring_summary":"Sets a specific key in the config dict\n :param option: the option key in the config, for example: \"max_depth\"\n :param value: value for the option","docstring_tokens":["Sets","a","specific","key","in","the","config","dict",":","param","option",":","the","option","key","in","the","config","for","example",":","max_depth",":","param","value",":","value","for","the","option"],"function":"def set_option(self, option, value):\n \"\"\"\n Sets a specific key in the config dict\n :param option: the option key in the config, for example: \"max_depth\"\n :param value: value for the option\n \"\"\"\n self._config[option] = value","function_tokens":["def","set_option","(","self",",","option",",","value",")",":","self",".","_config","[","option","]","=","value"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L205-L211"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler._is_timeout_reached","parameters":"(self)","argument_list":"","return_statement":"return is_timeout_set and is_timed_out","docstring":"Determines whether the specified timeout has reached, if no timeout\n is specified then return false\n :return: boolean indicating whether the timeout has reached","docstring_summary":"Determines whether the specified timeout has reached, if no timeout\n is specified then return false\n :return: boolean indicating whether the timeout has reached","docstring_tokens":["Determines","whether","the","specified","timeout","has","reached","if","no","timeout","is","specified","then","return","false",":","return",":","boolean","indicating","whether","the","timeout","has","reached"],"function":"def _is_timeout_reached(self):\n \"\"\"\n Determines whether the specified timeout has reached, if no timeout\n is specified then return false\n :return: boolean indicating whether the timeout has reached\n \"\"\"\n is_timeout_set = self._config[\"timeout\"] is not False # False is set when no timeout is desired\n end_time = self._start_time + datetime.timedelta(seconds=self._config[\"timeout\"])\n is_timed_out = datetime.datetime.now() >= end_time\n\n return is_timeout_set and is_timed_out","function_tokens":["def","_is_timeout_reached","(","self",")",":","is_timeout_set","=","self",".","_config","[","\"timeout\"","]","is","not","False","# False is set when no timeout is desired","end_time","=","self",".","_start_time","+","datetime",".","timedelta","(","seconds","=","self",".","_config","[","\"timeout\"","]",")","is_timed_out","=","datetime",".","datetime",".","now","(",")",">=","end_time","return","is_timeout_set","and","is_timed_out"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L213-L223"} {"nwo":"1tayH\/noisy","sha":"c21e7682d5626d96d0f6ee56a5ed749078d34aac","path":"noisy.py","language":"python","identifier":"Crawler.crawl","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Collects links from our root urls, stores them and then calls\n `_browse_from_links` to browse them","docstring_summary":"Collects links from our root urls, stores them and then calls\n `_browse_from_links` to browse them","docstring_tokens":["Collects","links","from","our","root","urls","stores","them","and","then","calls","_browse_from_links","to","browse","them"],"function":"def crawl(self):\n \"\"\"\n Collects links from our root urls, stores them and then calls\n `_browse_from_links` to browse them\n \"\"\"\n self._start_time = datetime.datetime.now()\n\n while True:\n url = random.choice(self._config[\"root_urls\"])\n try:\n body = self._request(url).content\n self._links = self._extract_urls(body, url)\n logging.debug(\"found {} links\".format(len(self._links)))\n self._browse_from_links()\n\n except requests.exceptions.RequestException:\n logging.warn(\"Error connecting to root url: {}\".format(url))\n \n except MemoryError:\n logging.warn(\"Error: content at url: {} is exhausting the memory\".format(url))\n\n except LocationParseError:\n logging.warn(\"Error encountered during parsing of: {}\".format(url))\n\n except self.CrawlerTimedOut:\n logging.info(\"Timeout has exceeded, exiting\")\n return","function_tokens":["def","crawl","(","self",")",":","self",".","_start_time","=","datetime",".","datetime",".","now","(",")","while","True",":","url","=","random",".","choice","(","self",".","_config","[","\"root_urls\"","]",")","try",":","body","=","self",".","_request","(","url",")",".","content","self",".","_links","=","self",".","_extract_urls","(","body",",","url",")","logging",".","debug","(","\"found {} links\"",".","format","(","len","(","self",".","_links",")",")",")","self",".","_browse_from_links","(",")","except","requests",".","exceptions",".","RequestException",":","logging",".","warn","(","\"Error connecting to root url: {}\"",".","format","(","url",")",")","except","MemoryError",":","logging",".","warn","(","\"Error: content at url: {} is exhausting the memory\"",".","format","(","url",")",")","except","LocationParseError",":","logging",".","warn","(","\"Error encountered during parsing of: {}\"",".","format","(","url",")",")","except","self",".","CrawlerTimedOut",":","logging",".","info","(","\"Timeout has exceeded, exiting\"",")","return"],"url":"https:\/\/github.com\/1tayH\/noisy\/blob\/c21e7682d5626d96d0f6ee56a5ed749078d34aac\/noisy.py#L225-L251"}