from time import sleep

import requests
from fontTools.ttLib import TTFont
from lxml import etree
import re
import pandas as pd

header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.69'
}
# urls = ['https://www.qidian.com/rank/yuepiao/chn21/']

num_dict = {
    'zero': '0',
    'one': '1',
    'two': '2',
    'three': '3',
    'four': '4',
    'five': '5',
    'six': '6',
    'seven': '7',
    'eight': '8',
    'nine': '9',
}


def main():
    urls = crawling_url_list()
    data = crawling_data(urls)
    save_data(data)


def crawling_url_list():
    url = 'https://www.qidian.com/rank/yuepiao/year2023-month11-page1/'
    response = requests.get(url, headers=header)
    html = response.text
    html_doc = etree.HTML(html)
    data_chanid = html_doc.xpath('//*[@id="-1"]/div[1]/div[6]/div[2]/div[1]/div/div/div[2]/p//a/@data-chanid')
    urls = [f'https://www.qidian.com/rank/yuepiao/chn{i}/' for i in data_chanid[1:]]
    return urls


def crawling_iconfont(woff_url):
    woff_resp = requests.get(woff_url, headers=header)
    with open("../static/font/qi_dian.woff", "wb") as code:
        code.write(woff_resp.content)
        code.flush()


# 定义一个函数，用于获取url后缀
def url_suffix(url_list):
    # 定义一个字符串，用于拼接url后缀
    postfix = 'year2023-month11-page'
    # 定义一个空列表，用于存放拼接后的url
    new_url_list = []
    # 遍历url列表
    for url in url_list:
        # 将拼接后的url添加到新列表中
        new_url_list += ([url + postfix + f'{i}' for i in range(1, 6)])
    # 返回拼接后的url列表
    return new_url_list


# 定义一个函数decode_month_ticket，用于解码月份票
def decode_month_ticket(month_ticket):
    # 加载字体文件
    qi_dian_font = TTFont('../static/font/qi_dian.woff')
    # 获取字体文件的字符映射
    code_dict = qi_dian_font.getBestCmap()
    # 获取月份票中的字符
    code_list = [t.encode('unicode_escape').decode()[2:] for t in month_ticket[0]]
    # 将字符转换为数字
    ticket = ''.join([num_dict[code_dict[int(code, 16)]] for code in code_list])
    # 返回月份票的数字
    return [int(ticket)]


def crawling_data(urls):
    data = []
    for url in url_suffix(urls):
        print(url)
        response = requests.get(url, headers=header)
        html = response.text
        html_doc = etree.HTML(html)
        li = html_doc.xpath('//*[@id="book-img-text"]//li')
        if not li:
            continue
        iconfont_str = li[0].xpath('./div[@class="book-right-info"]/div[1]/p/span/style/text()')[0]
        iconfont_url = re.findall(
            ".*?format\('eot'\).*?src: url\('(https://qidian.gtimg.com/qd_anti_spider/.*?.woff)'\) format\('woff'\).*?",
            iconfont_str, re.S)[0]
        crawling_iconfont(iconfont_url)
        for i in li:
            title = i.xpath('./div[@class="book-mid-info"]/h2/a/text()')
            author = i.xpath(
                './div[@class="book-mid-info"]/p[1]//a/text()|div[@class="book-mid-info"]/p[1]//span/text()')
            introduce = i.xpath('./div[@class="book-mid-info"]/p[2]/text()')
            month_ticket = i.xpath('./div[@class="book-right-info"]/div[1]/p/span/span/text()')
            month_ticket = decode_month_ticket(month_ticket)
            data.append(title + author + introduce + month_ticket)
        sleep(1)
    return data


def save_data(data):
    df = pd.DataFrame(data, columns=['title', 'author', 'category', 'secondary_category', 'status', 'introduce',
                                     'month_ticket'])
    df.to_csv('../static/data/qi_dian_book.csv', index=False, encoding='utf_8_sig')


if __name__ == '__main__':
    main()
