from pyquery import PyQuery
from requests.exceptions import RequestException
import requests
import time,json

def getPage(url):
    '''爬取数据'''
    try:
        headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5383.400 QQBrowser/10.0.1313.400'}
        data = requests.get(url,headers=headers)
        if data.status_code == 200:
            return data.text
        else:
            return None
    except RequestException as err:
        print(err)
        return None

def parseData(content):
    '''解析数据'''
    doc= PyQuery(content)
    books = doc('tr.item')
    for book in books.items():
        yield {
            'name':book.find('div.pl2 a').text(),
            'url':book.find('div.pl2 a').attr('href'),
            'des':book.find('span.inq').text(),
            'img':book.find('img').attr('src')
        }

def saveFile(content):
    with open('./douban.txt','a',encoding="utf-8") as f:
        f.write(json.dumps(content,ensure_ascii=False) + '\n\n')

def start(offset):
    url = "https://book.douban.com/top250?start=" + str(offset)
    html = getPage(url)
    if html:
        for dict in parseData(html):
            saveFile(dict)
    '''爬虫入口'''


if __name__ == '__main__':
    for i in range(10):
        start(offset=i*25)
        time.sleep(1)
