import requests
import csv
from urllib.parse import urlencode
from pyquery import PyQuery as pq

base_url = 'https://weread.qq.com/web/bookListInCategory/1000000?'  # 这里要换成对应Ajax请求中的链接

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
}  # 不同于简单的requests,只需要传入客户端信息就好了

# 数据存储到csv
def write_to_file(content):
    # ‘a’追加模式，‘utf_8_sig’格式到处csv不乱码
    with open('growthbook.csv', 'a', encoding='utf_8_sig', newline='') as f:
        fieldnames = ['searchIdx', 'title', 'author', 'star', 'lPushName', 'intro', 'cover']
        # 利用csv包的DictWriter函数将字典格式数据存储到csv文件中
        w = csv.DictWriter(f, fieldnames=fieldnames)
        w.writerow(content)

# 每页地址
def get_page(page):
    params = {
        'maxIndex': page * 20
    }
    url = base_url + urlencode(params)
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return response.json()  # 解析内容为json返回
    except requests.ConnectionError as e:
        print('Error', e.args)  # 输出异常信息

# 处理每页数据
def parse_page(json):
    if json:
        items = json.get('books')
        for item in items:
            searchIdx = item.get('searchIdx')
            item = item.get('bookInfo')
            growth = {}
            growth['searchIdx'] = searchIdx
            growth['title'] = item.get('title')
            growth['author'] = item.get('author')
            growth['star'] = item.get('star')
            growth['lPushName'] = item.get('lPushName')
            growth['intro'] = item.get('intro')
            growth['cover'] = item.get('cover')
            write_to_file(growth)
            yield growth


if __name__ == '__main__':  # 本脚本窗口下的程序名字就叫做main,且这样写在别的程序调用该脚本文件的函数时不会受到影响
    for page in range(0, 5):
        json = get_page(page)
        results = parse_page(json)
        for result in results:
            print(result)