import  requests
import re
from bs4 import  BeautifulSoup
from lxml import etree
import  time

header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'}
# 父节点  子节点  同胞节点  先辈节点  后代节点
urls=['http://www.qiushibaike.com/text/page/{}'.format(str(i)) for i in range(1,36)]

htmldemo='''
<div class="red">需要的内容1
  <h1>需要的内容2</h1>
</div>
'''

selectdemo=etree.HTML(htmldemo)
content1=selectdemo.xpath('//div[@class="red"]')[0]
content2=content1.xpath('string(.)')#标签嵌套的情况
print(content2)

print('---------------------------------------')
def re_scraper(url):
    res = requests.get(url, header)
    ids=re.findall('<h2>(.*?)</h2>',res.text,re.S)
    contents=re.findall('<div class="content">.*?<span>(.*?)</span>',res.text,re.S)
    for id,content in zip(ids,contents):
        info={
            'id':id,
            'content':content
        }
        return info
def bs_scraper(url):
    res = requests.get(url, header)
    soup=BeautifulSoup(res.text,'lxml')
    ids=soup.select('a > h2')
    contents=soup.select('div > span')
    for id,content in zip(ids,contents):
        info={
            'id':id,
            'content':content
        }
        return  info

def lxml_scraper(url):
    res = requests.get(url, header)
    selector=etree.HTML(res.text)
    infos=selector.xpath('//div[starts-with(@class,"article block untagged mb15")]')# 以XX 样式开始的元素
   # print(len(infos))
    try:
        for info in infos:
           id=info.xpath('div[1]/a[2]/h2/text()')[0]
           contents=info.xpath('a[1]/div/span/text()')[0]
           infolist = {
               'id': id,
               'content': contents
           }
           return infolist
    except Exception as e:
        print(e)

for name,scraper in [('RegularExpressions',re_scraper),('BeautifulSoup',bs_scraper),('Lxml',lxml_scraper)]:
    start=time.time()
    for url in urls:
        scraper(url)
        end=time.time()
    print(name,end-start)
lxml_scraper('https://www.qiushibaike.com/text/page/1/')
#print(res.text)
# ````````````````````````selector=etree.HTML(res.text)
# ids=selector.xpath('//div[@class="article block untagged mb15 typs_hot"]')
# print(ids)
# for id in ids:
#    print(id)
#    info=id.xpath('div[1]/a[2]/h2/text')
#    print(info)

#//*[@id="qiushi_tag_119879413"]/div[1]/a[2]/h2
#//*[@id="qiushi_tag_119879413"]/div[1]/a[2]/h2