from requests.exceptions import RequestException
from lxml import etree
from bs4 import BeautifulSoup
from pyquery import PyQuery
import requests
import time,json

def getPage(url):
    '''爬取指定url地址的信息'''
    try:
        #定义请求头信息
        headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'}
        #执行爬取
        res = requests.get(url,headers=headers)
        #判断并返回结果
        if res.status_code == 200:
            return res.text
        else:
            return None
    except RequestException:    
        return None
'''
 #==================使用Xpath解析=====================
def parsePage(content):
    html = etree.HTML(content)
    items = html.xpath("//td[@valign='top']")
    for item in items:
        yield{
          'title':item.xpath("//div[@class='pl2']/a/text()")[0].strip(),
          'image':item.xpath("//a[@class='nbg']/img/@src")[0],
          'info':item.xpath("//p[@class='pl']/text()")[0],
          'score':item.xpath("//span[@class='rating_nums']/text()")[0]
        }
'''
'''
 #==================使用BeautifulSoup解析=====================
def parsePage(content):
    soup = BeautifulSoup(content,'lxml')
    items = soup.find_all(name='tr',attrs={'class':'item'})
    for item in items:
        yield{
            'title':item.select("div.pl2 a")[0].get_text().strip(),
            'image':item.find(name='img',attrs={'width':'90'}).attrs['src'],
            'info':item.select("p.pl")[0].string,
            'score':item.select("span.rating_nums")[0].string,
        }
'''

#===================使用PyQuery解析===========================
def parsePage(content):
    doc = PyQuery(content)
    items = doc("tr.item")
    for item in items.items():
        yield{
            'title':item.find("div.pl2 a").text(),
            'image':item.find("a.nbg img").attr('src'),
            'info':item.find("p.pl").text(),
            'score':item.find("span.rating_nums").text()
        }


def writeFile(content):
    '''解析爬取网页中内容，并返回结果'''
    with open("./result.txt",'a',encoding="utf-8") as f:
        f.write(json.dumps(content,ensure_ascii=False)+"\n")

def main(offset):
    url = 'https://book.douban.com/top250?start='+str(offset)
    html = getPage(url)
    if html:
        for item in parsePage(html):
            print(item)
            writeFile(item)

if __name__ == '__main__':
    #main(0)
    for i in range(10):
        main(offset=i*25)
        time.sleep(1)