import requests
from requests.exceptions import RequestException
import time,json
from lxml import etree

#爬取指定url页面信息
def getPage(url):
    try:
        res = requests.get(url)
        if res.status_code == 200:
            return res.text
        else:
            return None
    except RequestException as err:
        print(err)
        return None

#解析爬取网页中的内容，并返回字段结果
def parsePage(content):
    #===========Xpath===========
    html = etree.HTML(content)
    items = html.xpath("//tr[@class='item']")
    for item in items:
        #获取引言时有空值，做异常处理，确保程序正常运行
        try:
            info = item.xpath(".//span[@class='inq']/text()")[0]
            if info != "":
                quote = info
        except:
            quote = ""
        yield {
            "书名":item.xpath(".//div[@class='pl2']/a/text()")[0],
            "引言":quote,
            "信息":item.xpath(".//p[@class='pl']/text()")[0],
            "评分":item.xpath(".//span[@class='rating_nums']/text()")[0],
            "图书封面":item.xpath(".//img[@width='90']/@src")[0],
        }

#执行文件追加写操作
def writeFile(content):
    with open("./result_xpath.txt","a",encoding="utf-8") as f:
        f.write(json.dumps(content,ensure_ascii=False)+"\n")

#主程序函数，负责调度执行爬虫处理
def main(offset):
    url = 'https://book.douban.com/top250?start='+str(offset)
    html = getPage(url)
    if html:
        for item in parsePage(html):
            #print(item)
            writeFile(item)

#判断当前执行是否为主程序，并遍历调用主函数爬取数据
if __name__ == '__main__':
    #main(0)
    for i in range(10):
        main(offset=i*25)
        time.sleep(1)