import time
from random import random

from bs4 import BeautifulSoup
import pandas as pd
from concurrent.futures import ThreadPoolExecutor

from tool import read_html

book_data = []


# 获取子页面url,返回列表
def get_suburl(url):
    suburlls = []
    resp = read_html(url)
    resp.encoding = 'gbk'
    bs = BeautifulSoup(resp.text, "html.parser")
    subtrls = bs.find("table",class_="cytable")("tr")[1:]
    for tr in subtrls:
        td = tr.find_all("td")[1]
        url = td.find("a").get("href")
        suburlls.append(url)
    return suburlls


# 获取单个页面数据
def get_subhtml(urlls):
    for url in urlls:
        urll = "https://www.jjwxc.net/" + url
        resp = read_html(urll)
        # time.sleep(random.randint(3, 10))
        resp.encoding = "gbk"
        bs = BeautifulSoup(resp.text, "html.parser")
        # 书籍内容 tbody
        book_tbody = bs.find("tbody")
        # 标题
        title = book_tbody.find("h1").text.strip()  # 后接一空格
        # 作者
        author = book_tbody.find("a").text
        # 非v章节章均点击数
        # clicknum = book_tbody.find("span",id="totleclick")
        # 总书评数
        viewCount = book_tbody.find("span", itemprop="reviewCount").text
        # 当前被收藏数
        collectedCount = book_tbody.find("span", itemprop="collectedCount").text
        # 营养液数
        SourceRanking = book_tbody.find("div", align="center").find_all("span")[3].text

        #
        div_type = bs.find("div", class_="righttd").find_all("li")
        # 文章类型
        book_type = div_type[0].text.split("：")[1].strip()
        # 作品视角
        book_view = div_type[1].text.split("：")[1].strip()
        # 作品风格
        book_style = div_type[2].text.split("：")[1].strip()
        # 所属系列
        book_series = "".join(div_type[3].text.split("：")[1].strip().split(" "))
        # 文章进度
        book_finish_case = div_type[4].text.split("：")[1].strip()
        # 全文字数(长篇，中篇，短篇)
        book_wordnum = div_type[5].text.split("：")[1].strip()
        # 出版情况
        img_ls = div_type[6].find_all("img")
        ls = []
        for img in img_ls:
            ls.append(img.get("title"))
        book_publish = "-".join(ls)
        # 签约状态
        author_status = div_type[7].text.split("：")[1].strip()
        book_data.append([title, author, viewCount, collectedCount,
                          SourceRanking, book_type, book_view, book_style,
                          book_series, book_finish_case, book_wordnum, book_publish, author_status])


def save_file(book_data):
    df = pd.DataFrame(book_data)
    df.columns = ["标题", "作者", "总书评数", "当前被收藏数",
                  "营养液数", "文章类型", "作品视角", "作品风格",
                  "所属系列", "文章进度", "全文字数", "出版情况", "签约状态"]
    df.to_csv("./file/book_datacsv", encoding="utf-8", index=False, mode="w")


# 每页数据提取并存入临时列表
def page_content(page):
    # https://www.jjwxc.net/bookbase.php?page=1
    # https://s8-static.jjwxc.net/comment_json.php?jsonpCallback=commnet_onebook_140421&novelid=3419133&chapterid=
    url = f"https://www.jjwxc.net/bookbase.php?page={page}"
    print(url)
    urlls = get_suburl(url)
    get_subhtml(urlls)


def main():
    with ThreadPoolExecutor(10) as t:
        for count in range(1, 11):
            t.submit(page_content, count)
    save_file(book_data)
    print("over!!!")


if __name__ == '__main__':
    main()
