# 用xpath 解析库对豆瓣top250图书进行爬取
# 用requests对数据进行爬取，用xpath对数据进行解析
from requests.exceptions import RequestException
import requests
from lxml import etree
import os,time,json

def getPage(url):
    '''定义页面爬取函数'''
    try:
        # 定义请求头部信息
        headers = {'User-Agent':'User-Agent:Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1'}
        res = requests.get(url,headers=headers)
        if res.status_code == 200:
            return res.text # 文本形式打印网页源码
        else:
            return None
    except RequestException:
        return None

def parsePage(content):
    '''对爬取的页面进行解析'''
    html = etree.HTML(content) # 解析html文档，返回根节点对象
    items = html.xpath("//tr[@class='item']") # 获取网页中所有标签并遍历输出标签名
    
    for item in items:
        #print(item)
       
        yield{
            'image' : item.xpath('.//img[@width="90"]/@src')[0],
            'title' : item.xpath('.//td/div/a/@title'),
            'author' : (item.xpath('.//td/p/text()')[0]).split('/')[0],
            'grade' : item.xpath('.//span[@class="rating_nums"]/text()'),
            'content' : item.xpath('.//span[@class="inq"]/text()'), 
                }

def writeFile(content):
    '''执行文件追加写操作'''
    with open("./xpath_result.txt",'a',encoding='utf-8') as f:
    	f.write(json.dumps(content,ensure_ascii=False)+"\n")

def main(offset):
    '''负责调度函数'''
    url = "https://book.douban.com/top250?start="+str(offset)  # 构造url地址
    html = getPage(url) # 调用getPage函数对url地址进行爬取
    # 判断是否爬取到数据，并调用解析函数
    if html:
        for item in parsePage(html):
            print(item)
            writeFile(item)


if __name__ == '__main__':
    for i in range(10):
        main(offset=i*25)
        time.sleep(1)

