# 协程 time： 162.68639540672302 19198行数据
# 多线程 time： 159.52501821517944 19907行数据
import asyncio
import time
from bs4 import BeautifulSoup
import pandas as pd
from concurrent.futures import ThreadPoolExecutor
from tool import read_html

book_comment_data = []


# 获取子页面url,返回列表
def get_suburl(url):
    suburlls = []
    resp = read_html(url)
    resp.encoding = 'gbk'
    bs = BeautifulSoup(resp.text, "html.parser")
    subtrls = bs.find("tbody")("tr")[1:]
    for tr in subtrls:
        td = tr.find_all("td")[1]
        url = td.find("a").get("href")
        suburlls.append(url)
    return suburlls


'''
多线程运行：
def get_subhtml_all(urlls):
    with ThreadPoolExecutor(20) as t:
        for url in urlls:
            t.submit(get_subhtml,url)
'''


async def get_subhtml_all(urlls):
    tasks = []
    for url in urlls:
        task = get_subhtml(url)
        tasks.append(asyncio.create_task(task))
    await asyncio.wait(tasks)


# https://my.jjwxc.net/novelreview.php?novelid=3419133&action=getReviewList&params%5Bpage%5D=1
# https://my.jjwxc.net/novelreview.php?novelid=3419133&action=getReviewList&params%5Bpage%5D=2
# https://my.jjwxc.net/novelreview.php?callback=jQuery111207866898938517757_1668298430661&novelid=3419133&action=getReviewList&params%5Bscore%5D=0&params%5Bpage%5D=1&_=1668298430662
# https://my.jjwxc.net/novelreview.php?callback=jQuery111207866898938517757_1668298430657&novelid=3419133&action=getReviewList&params%5Bscore%5D=0&params%5Bpage%5D=2&_=1668298430663

# 获取单个页面数据
async def get_subhtml(url):
    # https://www.jjwxc.net/onebook.php?novelid=3419133
    novelid = url.split("novelid=")[1]
    url_detail = f"https://www.jjwxc.net/onebook.php?novelid={novelid}"
    resp_detail = read_html(url_detail)
    resp_detail.encoding = 'gbk'
    book_title = BeautifulSoup(resp_detail.text, "html.parser").find('tbody').find("h1").text.strip()
    page = 1  # 获取第一页数据，如有需要可以循环和修改
    url_comment = f"https://my.jjwxc.net/novelreview.php?novelid={novelid}&action=getReviewList&params%5Bpage%5D={page}"
    resp_comment = read_html(url_comment)
    # 可不写，默认和 pycharm 一样 UTF-8
    resp_comment.encoding = "utf-8"
    webpage_content = resp_comment.json()
    data_list = webpage_content['data']['list']
    # print(len(data_list))
    # "书名","网友","所评章节","书评"
    # 爬取每本书前20条评论
    for i in range(20):
        if i < len(data_list):
            book_comment_data.append(
                [book_title, data_list[i]['nickname'], data_list[i]['dateline'], data_list[i]['commentbody']])


def save_file(book_data):
    df = pd.DataFrame(book_data)
    df.columns = ["书名", "网友", "所评章节", "书评"]
    # df.to_csv("./file/book_comment.csv",encoding="utf-8",index=False,mode="w")
    df.to_excel("./file/book.xlsx", sheet_name='comment', index=False)


# 每页数据提取并存入临时列表
def page_content(page):
    # 需要读取的网页URL地址
    url = f"https://www.jjwxc.net/bookbase.php?page={page}"
    # 控制台输出URL地址
    print(url)
    # 得到子页面所有的URL地址
    urlls = get_suburl(url)
    # 得到子页面所需内容并存入临时列表
    asyncio.run(get_subhtml_all(urlls))


def main():
    t1 = time.time()
    # 开启多线程 10个
    with ThreadPoolExecutor(10) as t:
        for count in range(1, 11):
            t.submit(page_content, count)
    # 将列表中的数据保存到文件中
    # print(book_comment_data)
    save_file(book_comment_data)
    t2 = time.time()
    # 完成提示
    print("over!!!")
    print("time：", t2 - t1)


if __name__ == '__main__':
    main()
