#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :categorys_book.py
# @Time      :2023/10/16 
# @Author    :CL
# @email     :1037654919@qq.com
from get_chapter_num import get_chapter_num
# 基于榜单下载书目
from utils import MongoDBUtil, mongo_manager
import requests
from bs4 import BeautifulSoup
import json
from multiprocessing import Pool
from get_new_cookies import get_new_skey

MongoDBUtil = MongoDBUtil(db_name="wx_read")
book_id = mongo_manager("book_id", db="wx_read")
def get_page():
    headers = {
        "authority": "weread.qq.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9",
        "cache-control": "max-age=0",
        "referer": "https://weread.qq.com/",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "same-origin",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
    }
    cookies = {
        "qq_domain_video_guid_verify": "b9f5e2164847a357",
        "pgv_pvid": "3380599040",
        "fqm_pvqid": "bbafb710-8487-4c50-897c-0c9a5a43eddb",
        "RK": "AT2N+qJJSD",
        "ptcz": "c0dba204eb3ed47316a75aa89b318ce1ebfd84aa70c620a582419ea7bb6e090e",
        "tmeLoginType": "2",
        "psrf_qqopenid": "07B848438A1DF5D6FBFA931D2CDFD036",
        "psrf_qqunionid": "73F2F591EF6BBB07004184D7E0024E86",
        "psrf_qqrefresh_token": "EFB86C3BB6541BFB0424C5215F1D2A90",
        "psrf_access_token_expiresAt": "1698199558",
        "euin": "oKni7iCk7eE5Nv**",
        "wxrefresh_token": "",
        "uin": "1037654919",
        "psrf_qqaccess_token": "AF150CB8C6BAD8AC4C84D6C206FB2D68",
        "wxopenid": "",
        "wxunionid": "",
        "wr_fp": "1600462502",
        "wr_gid": "247667256",
        "wr_vid": "342130397",
        "wr_pf": "0",
        "wr_rt": "web%40eaR9jPKOLUmUBhJ3YT8_AL",
        "wr_localvid": "dae32b70814647edddae820",
        "wr_name": "%E8%90%A7%E7%9F%B3%E5%AD%90",
        "wr_avatar": "https%3A%2F%2Fres.weread.qq.com%2Fwravatar%2FWV0025-~CWxfQNwWz94YXURCFRNud4%2F0",
        "wr_gender": "1",
        "wr_theme": "white",
        "wr_skey": "TtltSD0r"
    }
    url = "https://weread.qq.com/web/category/rising"
    response = requests.get(url, headers=headers, cookies=cookies)

    # print(response.text)
    print(response)
    return response.text



def get_categorys_book(category):
    # category = {'name': '飙升榜', 'href': 'https://weread.qq.com/web/category/rising'},
    maxIndex = 0
    hasMore = 1
    while hasMore == 1:
        book_id = mongo_manager("book_id", db="wx_read")
        res = get_book_list(category, maxIndex)
        # print(res)
        json_data = json.loads(res)
        try:
            hasMore = json_data["hasMore"]
        except:
            hasMore = 0
        book_list = []
        try:
            book_list = json_data["books"]
        except:
            book_list = []
        for book in book_list:
            reslut = parse_book_info(book)
            # print(reslut)
            reslut["_id"] = reslut['bookId']
            reslut["category"] = category

            try:
                reslut["chapterInfos"] = get_chapter_num(reslut['bookId'])

                chapter_num=[]
                for chapter in reslut["chapterInfos"]:
                    if chapter["price"] <= 0:
                        chapter_num.append(chapter["chapterUid"])
                reslut["chapter_num"] = chapter_num
            except Exception as e:
                print(e)
                pass
            try:
                MongoDBUtil.insert_one('book_id', reslut)
            except Exception as e:
                pass
            finally:
                book_id.updateOne({'_id': reslut['_id']}, reslut)

        # print(maxIndex)
        # # print(hasMore)
        maxIndex += 20
        if hasMore == 0:
            break

        book_id.close()
        break


def get_book_list(category, maxIndex=0):
    import requests
    name = category['href'].split('/')[-1]
    headers = {
        "authority": "weread.qq.com",
        "accept": "application/json, text/plain, */*",
        "accept-language": "zh-CN,zh;q=0.9",
        "if-none-match": "W/\"7e98-60WsfjBQVbK4xy0VB+qElww3Ls4\"",
        "referer": "https://weread.qq.com/web/category/rising",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-origin",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
    }
    cookies = {
        "qq_domain_video_guid_verify": "b9f5e2164847a357",
        "pgv_pvid": "3380599040",
        "fqm_pvqid": "bbafb710-8487-4c50-897c-0c9a5a43eddb",
        "RK": "AT2N+qJJSD",
        "ptcz": "c0dba204eb3ed47316a75aa89b318ce1ebfd84aa70c620a582419ea7bb6e090e",
        "tmeLoginType": "2",
        "psrf_qqopenid": "07B848438A1DF5D6FBFA931D2CDFD036",
        "psrf_qqunionid": "73F2F591EF6BBB07004184D7E0024E86",
        "psrf_qqrefresh_token": "EFB86C3BB6541BFB0424C5215F1D2A90",
        "psrf_access_token_expiresAt": "1698199558",
        "euin": "oKni7iCk7eE5Nv**",
        "wxrefresh_token": "",
        "uin": "1037654919",
        "psrf_qqaccess_token": "AF150CB8C6BAD8AC4C84D6C206FB2D68",
        "wxopenid": "",
        "wxunionid": "",
        "wr_fp": "1600462502",
        "wr_gid": "247667256",
        "wr_vid": "342130397",
        "wr_pf": "0",
        "wr_rt": "web%40eaR9jPKOLUmUBhJ3YT8_AL",
        "wr_localvid": "dae32b70814647edddae820",
        "wr_name": "%E8%90%A7%E7%9F%B3%E5%AD%90",
        "wr_avatar": "https%3A%2F%2Fres.weread.qq.com%2Fwravatar%2FWV0025-~CWxfQNwWz94YXURCFRNud4%2F0",
        "wr_gender": "1",
        "wr_theme": "white",
        "wr_skey": "TtltSD0r"
    }
    url = f"https://weread.qq.com/web/bookListInCategory/{name}"
    print(url)
    params = {
        "maxIndex": maxIndex,
    }
    try:
        name = int(name)
    except:
        params["rank"] = "1"
    response = requests.get(url, headers=headers, cookies=cookies, params=params)

    # print(response)
    return response.text


def parse_book_info(book):
    reslut = {}
    try:
        reslut['readingCount'] = book['readingCount']
    except:
        reslut['readingCount'] = 0
    try:
        reslut['hints'] = book['hints']
    except:
        reslut['hints'] = ""
    reslut['intro'] = book['bookInfo']['intro']
    reslut['bookStatus'] = book['bookInfo']['bookStatus']
    reslut['category'] = book['bookInfo']['category']
    reslut['publishTime'] = book['bookInfo']['publishTime']
    reslut['newRatingDetail'] = book['bookInfo']['newRatingDetail']
    reslut['author'] = book['bookInfo']['author']
    reslut['bookId'] = book['bookInfo']['bookId']
    reslut['title'] = book['bookInfo']['title']
    reslut['cover'] = book['bookInfo']['cover']
    reslut['centPrice'] = book['bookInfo']['centPrice']
    try:
        reslut['translatorSeg'] = book['translatorSeg']
    except:
        reslut['translatorSeg'] = None
    return reslut
def parse_category():
    soups = BeautifulSoup(get_page(), 'html.parser')
    datas = soups.find('ul', class_='ranking_list').find_all('li')
    categorys = []
    for data in datas:
        try:
            name = data.find('a', class_='ranking_list_item_link').find('span').text
        except:
            name = data.find('a', class_='ranking_list_item_link').text
        href = data.find('a', class_='ranking_list_item_link').get('href')
        # print(name, href)
        categorys.append({'name': name, 'href': 'https://weread.qq.com' + href})
    return categorys

# 主函数 根据榜单下载书目
def main():
    # run
    categorys = parse_category()
    print(categorys)


    # run
    pool = Pool(processes=30)
    pool.map(get_categorys_book, categorys)
    pool.close()  # 关闭进程池，不再接受新的进程
    pool.join()
