from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import json
import time
import requests


def getPage(url):
    data = {}
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,fr;q=0.7',
        'Connection': 'keep-alive',
        'Cookie': 'bid=FOENh9fYqr4; douban-fav-remind=1; __utmz=30149280.1547116978.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _pk_ses.100001.3ac3=*; ap_v=0,6.0; __utma=30149280.1317406171.1547116978.1547116978.1550820261.2; __utmc=30149280; __utma=81379588.1936403228.1550820261.1550820261.1550820261.1; __utmc=81379588; __utmz=81379588.1550820261.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); viewed="3646172"; gr_user_id=0b3f1da2-99b1-43fd-b2c1-865b6c4bd3a2; gr_session_id_22c937bbd8ebd703f2d8e9445f7dfd03=98641891-50ad-4e87-a4df-ab5c5d7b9bf1; gr_cs1_98641891-50ad-4e87-a4df-ab5c5d7b9bf1=user_id%3A0; _vwo_uuid_v2=D1BE836836B33C9FA0E91CACFABA56926|2d3f5460c6c5756f777b50dc4f6d9439; gr_session_id_22c937bbd8ebd703f2d8e9445f7dfd03_98641891-50ad-4e87-a4df-ab5c5d7b9bf1=true; __utmt_douban=1; __utmt=1; ct=y; _pk_id.100001.3ac3=41f107c00b673f32.1550820261.1.1550821282.1550820261.; __utmb=30149280.10.10.1550820261; __utmb=81379588.10.10.1550820261',
        'Host': 'book.douban.com',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36',
    }
    try:
        res = requests.get(url, data=data, headers=headers)
        if res.status_code == 200:
            return res.content.decode('utf-8')
        else:
            return None
    except RequestException:
        return None


def parsePage(content):
    # 初始化html文档解析对象，bs4需要指定解析器
    soup = BeautifulSoup(content, "lxml")
    items = soup.find_all(name='table', attrs={'width': '100%'})
    # 遍历并解析每本图书具体信息
    for item in items:
        yield {
            # 方法选择器，可以一下子多个find直接找到并输出要的东西，也可以一步步拆分来做
            'title': item.find(name='div', attrs={'class': "pl2"}).find('a').attrs['title'],
            'detail': item.find(name='p').get_text(),
            # 'detail': item.find(name='p').string,
            'score': item.find(name='span', attrs={'class': "rating_nums"}).get_text(),
        }


def writeFile(content):
    '''解析爬取网页中内容，并返回结果'''
    with open("./豆瓣bs4.txt", 'a', encoding="utf-8") as f:
        f.write(json.dumps(content, ensure_ascii=False) + "\n")
        # 编码中文默认也是ascii编码，所以记得指定ensur_ascii=False


def main(page):
    '''主程序函数，负责调度执行爬取处理'''
    url = 'https://book.douban.com/top250?start=' + str(25 * page)
    html = getPage(url)  # 执行爬取
    if html:
        for item in parsePage(html):  # 执行解析并遍历
            print(item)
            writeFile(item)  # 执行写操作


if __name__ == '__main__':
    for page in range(0, 11):
        main(page)
        time.sleep(1)
