#使用Xpath,BeautifulSoup,PyQuery分页爬取豆瓣图书Top250信息,并将信息写入文件中
import requests,time,json
from requests.exceptions import RequestException
from lxml import etree
from bs4 import BeautifulSoup
from pyquery import PyQuery

def getPage(offset):
    '''爬取网页内容'''
    url = "https://book.douban.com/top250"
    data = {'start':str(offset)}
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
    try:
        res = requests.get(url,params=data,headers=headers)
        if res.status_code == 200:
            return res.text
        else:
            return None
    except RequestException:
        return None

def parsePage(content):
    '''解析网页内容'''
    #===========使用Xpath解析==================
    '''
    html = etree.HTML(content)
    items = html.xpath("//tr[@class='item']")
    for item in items:
        if item.xpath(".//span[@class='inq']/text()"):
            yield{               
                '书名':item.xpath(".//div[@class='pl2']/a/text()")[0].strip(),
                '作者':item.xpath(".//p[@class='pl']/text()")[0].split('/')[0].strip(),
                '评分':item.xpath(".//span[@class='rating_nums']/text()")[0],
                '评价':item.xpath(".//span[@class='inq']/text()")[0],
                '图片':item.xpath(".//img/@src")[0],
            }
        else:
            yield{         
                '书名':item.xpath(".//div[@class='pl2']/a/text()")[0].strip(),
                '作者':item.xpath(".//p[@class='pl']/text()")[0].split('/')[0].strip(),
                '评分':item.xpath(".//span[@class='rating_nums']/text()")[0],
                '图片':item.xpath(".//img/@src")[0],
            }   
    '''
    #=========使用BeautifulSoup解析============
    '''
    soup = BeautifulSoup(content,'lxml')
    items = soup.find_all(name='tr',attrs={'class':'item'})
    for item in items:
        if item.find(name='span',attrs={'class':'inq'}):
            yield{               
                '书名':item.select("div.pl2 a")[0].get_text().strip(),
                '作者':item.find(name='p',attrs={'class':'pl'}).string.split('/')[0].strip(),
                '评分':item.find(name='span',attrs={'class':'rating_nums'}).string,
                '评价':item.find(name='span',attrs={'class':'inq'}).string,
                '图片':item.find(name='img',attrs={'width':'90'}).attrs['src'],
            }
        else:
            yield{         
                '书名':item.select("div.pl2 a")[0].get_text().strip(),
                '作者':item.find(name='p',attrs={'class':'pl'}).string.split('/')[0].strip(),
                '评分':item.find(name='span',attrs={'class':'rating_nums'}).string,
                '图片':item.find(name='img',attrs={'width':'90'}).attrs['src'],
            }        
    '''
    #========使用PyQuery解析============
    doc = PyQuery(content)
    items = doc("tr.item")
    for item in items.items():
        if item("span.inq").text():
            yield{               
                '书名':item("div.pl2 a").text(),
                '作者':item("p.pl").text().split('/')[0].strip(),
                '评分':item("span.rating_nums").text(),
                '评价':item("span.inq").text(),
                '图片':item("img").attr('src'),
            }
        else:
            yield{         
                '书名':item("div.pl2 a").text(),
                '作者':item("p.pl").text().split('/')[0].strip(),
                '评分':item("span.rating_nums").text(),
                '图片':item("img").attr('src'),
            }


def writeFile(content):
    '''将解析出来的数据写入文件'''
    with open('./result.txt','a',encoding='utf-8') as f:
        f.write(json.dumps(content,ensure_ascii=False)+'\n')

def main(offset):
    content = getPage(offset)
    html = parsePage(content)
    if html:
        for item in html:
            print(item)
            writeFile(item)

if __name__ == "__main__":
    
    for i in range(10):
        main(offset=i*25)
        time.sleep(1)
    
    #main(0)




































