# 分页爬取豆瓣网图书Top250信息
import requests
import time
import json
from lxml import etree
from bs4 import BeautifulSoup
from pyquery import PyQuery

# 保存信息到文件 序号/标题/图片/作者/评分/简介
def writeFile(content):
    with open("data_douban_02.txt","a",encoding="utf-8") as f:
        f.write(json.dumps(content,ensure_ascii=False)+"\n")

# 爬取网页数据
def getPageHtml(url):
    try:
        # 爬取网页信息
        header={
            "User-Agent":"Mozilla / 5.0(Windows NT 10.0;WOW64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
        }
        res=requests.get(url,headers=header)
        if res and res.status_code==200:
            return res.text
        else:
            return None
    except Exception as err:
        return None

# ========使用Xpath解析爬取的数据============

# 解析爬取的网页数据-Xpath
def parseHtmlByXpath(html):
    # 解析<tr class="item"></tr>中的内容-每本书的信息
    content=etree.HTML(html)
    booklist=content.xpath("//tr[@class='item']")
    # 遍历解析每本书的信息
    for book in booklist:
        image=book.xpath(".//td[1]/a/img/@src") # 图片
        if len(image)>0:
            image=image[0]
        title=book.xpath(".//td[2]/div[@class='pl2']/a/text()") # 标题
        if len(title)>0:
            title=title[0]
        title=title.replace("\n","").strip(" ")
        author=book.xpath(".//td[2]/p[@class='pl']/text()") # 作者
        if len(author)>0:
            author=author[0]
        score= book.xpath(".//td[2]/div[@class='star clearfix']/span[@class='rating_nums']/text()") #评分
        if len(score)>0:
            score=score[0]
        intro=book.xpath(".//td[2]/p[@class='quote']/span/text()") # 简介
        if len(intro)>0:
            intro=intro[0]
        yield {
            "index":0,
            "title":title,
            "author":author,
            "score":score,
            "image": image,
            "intro": intro
        }
# 程序主函数
def main_xpath(start):
    # 爬取地址
    url="https://book.douban.com/top250?start="+str(start)
    html=getPageHtml(url) # 爬取
    if html:
        total=start
        book=parseHtmlByXpath(html) #解析
        for item in book:
            total+=1
            item["index"]=total # 序号
            print(item)
            writeFile(item)
    else:
        print("未爬取到信息")

# ========end使用Xpath解析爬取的数据============


# ========使用BeautifulSoup解析爬取的数据============

# 解析爬取的网页数据-BeautifulSoup
def parseHtmlByBeautifulSoup(html):
    # 初始化 返回beautifulSoup对象
    soup=BeautifulSoup(html,'lxml')
    # 解析<tr class="item"></tr>中的内容-每本书的信息
    booklist=soup.find_all(name="tr",attrs={"class":"item"})
    # 遍历解析每本书的信息
    for book in booklist:
        image = book.find(name="img",attrs={"width":"90"})  # 图片
        if image:
            image=image.attrs["src"]
        else:
            image=""
        title=""# 标题
        titles = book.select("div.pl2 a")
        if len(titles)>0:
            title=titles[0].get_text()
        title = title.replace("\n", "").strip(" ")
        author=""# 作者
        authors = book.select("p.pl")
        if len(authors)>0:
            author=authors[0].get_text()
        score="" # 评分
        scores = book.select("div.star span.rating_nums")
        if len(scores)>0:
            score=scores[0].get_text()
        intro="" # 简介
        intros = book.select("p.quote span")
        if len(intros)>0:
            intro=intros[0].get_text()
        yield {
            "index": 0,
            "title": title,
            "author": author,
            "score": score,
            "image": image,
            "intro": intro
        }

# 程序主函数
def main_beautifulSoup(start):
    # 爬取地址
    url="https://book.douban.com/top250?start="+str(start)
    html=getPageHtml(url) # 爬取
    if html:
        total=start
        book=parseHtmlByBeautifulSoup(html) #解析
        for item in book:
            total+=1
            item["index"]=total # 序号
            print(item)
            writeFile(item)
    else:
        print("未爬取到信息")

# ========end使用BeautifulSoup解析爬取的数据============


# ========使用PyQuery解析爬取的数据============
# 解析爬取的网页数据-PyQuery
def parseHtmlByPyQuery(html):
    # 初始化 返回pyquery对象
    doc = PyQuery(html)
    # 解析<tr class="item"></tr>中的内容-每本书的信息
    booklist = doc("tr.item")
    # 遍历解析每本书的信息
    for book in booklist.items():
        image = book.find("td img").attr("src")  # 图片
        title = book.find("div.pl2 a").text() # 标题
        title = title.replace("\n", "").strip(" ")
        author = ""  # 作者
        authors = book.find("p.pl")
        if len(authors) > 0:
            author = authors.text()
        score = ""  # 评分
        scores = book.find("div.star span.rating_nums")
        if len(scores) > 0:
            score = scores.text()
        intro = ""  # 简介
        intros = book.find("p.quote span")
        if len(intros) > 0:
            intro = intros.text()
        yield {
            "index": 0,
            "title": title,
            "author": author,
            "score": score,
            "image": image,
            "intro": intro
        }
# 程序主函数
def main_PyQuery(start):
    # 爬取地址
    url="https://book.douban.com/top250?start="+str(start)
    html=getPageHtml(url) # 爬取
    if html:
        total=start
        book=parseHtmlByPyQuery(html) #解析
        for item in book:
            total+=1
            item["index"]=total # 序号
            print(item)
            writeFile(item)
    else:
        print("未爬取到信息")
# ========end使用PyQuery解析爬取的数据============

if __name__=="__main__":
    start=0
    for i in range(10):
        start=i * 25

        # 使用Xpath解析
        main_xpath(start)

        # 使用BeautifulSoup解析
        # main_beautifulSoup(start)

        # 使用PyQuery解析
        # main_PyQuery(start)

        time.sleep(1)