#爬虫闯关
# 第一关
#源地址： http://www.heibanke.com/lesson/crawler_ex00/   获取标记数字拼接组成新的地址并访问获取标记数字重复

#获取 h3 标记中的数据并提取数字

#BeautifulSoup实现
'''
#导入相关的库
import requests,bs4,re
#设置初始地址
url = "http://www.heibanke.com/lesson/crawler_ex00/"

#循环获取直至结束
while True:
    print("打开页面: %s" % url)
    response = requests.get(url)
    print("请求状态码: %s" % str(response.status_code))

    soup = bs4.BeautifulSoup(response.text,"html.parser")

    #根据标识符来获取数据
    comic = soup.select('h3')
    print("获取到的H3中的数据: %s" % comic[0].getText())
    number = re.findall("\d+",comic[0].getText())

    #如果数字为空则结束
    if number == []:
        print("结束: %s" % url)
        break
    else:
        url = "http://www.heibanke.com/lesson/crawler_ex00/" + number[0]
'''

#selenium 实现
#导入相关的库
import requests,re
from selenium import webdriver

url = "http://www.heibanke.com/lesson/crawler_ex00/"

browser = webdriver.Firefox()

while True:
    print('打开页面: %s' % url)
    browser.get(url)
    elem = browser.find_element_by_tag_name('h3')

    print('获取到的参数数据 %s' % elem.text)
    number = re.findall("\d+",elem.text)
    if number == []:
        print('The end %s' % url)
        break
    else:
        url = "http://www.heibanke.com/lesson/crawler_ex00/" + number[0]