from urllib.error import URLError, HTTPError
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re

# common parameters
proteinNumberUrl = "http://www.ncbi.nlm.nih.gov/nuccore/"
interpreter = ["html.parser","lxml","xml","html5lib"]
errorTimes = 0    # record the continual error appear times
tolerateErrorTimes = 300    # defind the tolerable maximal continual error appear times
proteinNumber = 2675

"""def proteinCrawler():"""
# crawling
while True:
    if errorTimes >= tolerateErrorTimes:
        break
    html = urlopen(proteinNumberUrl + str(proteinNumber))
    try:
        html = urlopen(proteinNumberUrl + str(proteinNumber))
    except URLError as e1:
        print("(%d)url open ERROR!,reason is %s" % (proteinNumber, e1.reason))
        proteinNumber += 1
        continue
    except HTTPError as e2:
        print("(%d)url open ERROR!,error code is %d,reason is %s,header is%s" % (proteinNumber, e2.code, e2.reason, e2.headers))
        proteinNumber += 1
        continue
    bsObj = BeautifulSoup(html, interpreter[3])
    try:
        content = bsObj.find("div", {"class": "rprtheader"}).find("h1").get_text()
        print(str(proteinNumber) + ": " +content)
        proteinNumber += 1
        errorTimes = 0
    except AttributeError:
        print(str(proteinNumber) + ": << ERROR! >>")
        proteinNumber += 1
        errorTimes += 1
    continue

