# 在文件顶部导入部分添加以下代码
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
from bs4 import BeautifulSoup
import requests
from datetime import datetime
from config import get_headers, PROXIES
import matplotlib as mpl
import matplotlib.font_manager as fm
import re  # 添加正则表达式模块
import time  # 确保time模块已导入

# Windows系统中文字体设置
font_path = 'C:/Windows/Fonts/simhei.ttf'  # 黑体
# 或者使用以下字体之一
# font_path = 'C:/Windows/Fonts/msyh.ttc'  # 微软雅黑
# font_path = 'C:/Windows/Fonts/simsun.ttc'  # 宋体

# 检查字体文件是否存在
if os.path.exists(font_path):
    # 添加字体
    font_prop = fm.FontProperties(fname=font_path)
    # 设置全局字体
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 黑体
    plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
else:
    print(f"警告：找不到字体文件 {font_path}，图表中文可能无法正确显示")

mpl.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'SimSun', 'KaiTi', 'FangSong']  # 优先使用的中文字体列表
mpl.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
mpl.rcParams['font.family'] = 'sans-serif'  # 使用无衬线字体
import random

DATA_PATH = 'data/qidian_rank.csv'
FIGURE_DIR = 'output/figures/'
REPORT_PATH = 'output/2023-12-report.html'

def get_today_str():
    return datetime.now().strftime('%Y%m%d')

def get_category_from_url(url):
    # 简单从url中提取分类名
    if 'chn1' in url:
        return '奇幻'
    elif 'chn21' in url:
        return '玄幻'
    elif 'chn2' in url:
        return '武侠'
    elif 'chn22' in url:
        return '仙侠'
    else:
        return '综合'

# 1. 数据采集

def fetch_qidian_rank():
    """
    爬取起点排行榜数据，保存为csv
    """
    if not os.path.exists('data'):
        os.makedirs('data')
    # url = 'https://www.qidian.com/rank/yuepiao/' 总榜
    # url = 'https://www.qidian.com/rank/yuepiao/chn21/' 玄幻
    # url = 'https://www.qidian.com/rank/yuepiao/chn1/' 奇幻
    # url = 'https://www.qidian.com/rank/yuepiao/chn2/' 武侠
    url = 'https://www.qidian.com/rank/yuepiao/chn22/' #仙侠

    today_str = get_today_str()
    category = get_category_from_url(url)
    csv_name = f'qidian_rank_{today_str}_{category}.csv'
    global DATA_PATH
    DATA_PATH = f'data/{csv_name}'
    
    # 添加重试机制
    max_retries = 3
    retry_count = 0
    
    while retry_count < max_retries:
        resp = requests.get(url, headers=get_headers(), proxies=PROXIES, timeout=10)
        print(f"页面状态码: {resp.status_code}")
        
        # 处理202状态码，重新生成cookie并重试
        if resp.status_code == 202:
            print("收到202状态码，使用新cookie重试...")
            retry_count += 1
            # 可以在这里添加随机延时，避免频繁请求
            import time
            time.sleep(random.uniform(2, 5))
            continue
        elif resp.status_code != 200:
            print(f"请求失败，状态码: {resp.status_code}")
            retry_count += 1
            time.sleep(random.uniform(1, 3))
            continue
        else:
            # 状态码为200，跳出循环继续处理
            break
    
    if retry_count >= max_retries:
        print(f"已达到最大重试次数({max_retries})，请求失败")
        quit()
        return
        return
    
    soup = BeautifulSoup(resp.text, 'html.parser')
    books = []
    
    # 调试信息，帮助查看页面结构
    print(f"页面标题: {soup.title.text if soup.title else '无标题'}")
    print(f"页面编码: {resp.encoding}")
    
    # 根据提供的HTML结构提取排行榜内容
    book_items = soup.select('li[data-rid]')
    print(f"找到{len(book_items)}个书籍条目")
    
    for item in book_items:
        try:
            # 修正：从子节点a[data-bid]获取书籍ID
            a_tag = item.select_one('a[data-bid]')
            book_id = a_tag.get('data-bid') if a_tag else None
            if not book_id:
                print(f"无法获取书籍ID，跳过")
                continue
                
            # 提取书名
            title_elem = item.select_one('.book-mid-info h2 a')
            
            # 提取作者
            author_elem = item.select_one('.book-mid-info .author .name')
            
            # 提取分类
            category_elems = item.select('.book-mid-info .author a')
            category_elem = category_elems[1] if len(category_elems) > 1 else None
            subcategory_elem = category_elems[2] if len(category_elems) > 2 else None
            
            # 提取更新信息
            update_elem = item.select_one('.book-mid-info .update a')
            update_time_elem = item.select_one('.book-mid-info .update span')
            
            # 提取月票 - 新增字段
            monthly_ticket_elem = item.select_one('.book-right-info .total span span')
            
            # 如果找不到必要的元素，则跳过
            if not title_elem or not author_elem:
                print(f"跳过一个条目：找不到标题或作者")
                continue
                
            title = title_elem.text.strip()
            author = author_elem.text.strip()
            
            # 处理分类信息
            category = category_elem.text.strip() if category_elem else '未知'
            subcategory = subcategory_elem.text.strip() if subcategory_elem else ''
            full_category = f"{category}/{subcategory}" if subcategory else category
            
            # 处理更新信息
            update_chapter = update_elem.text.strip() if update_elem else ''
            update_time = update_time_elem.text.strip() if update_time_elem else ''
            
            # 处理月票信息 - 可能需要特殊处理反爬机制
            monthly_ticket = ''
            if monthly_ticket_elem:
                # 尝试获取月票数，处理可能的反爬机制
                try:
                    # 如果是普通文本
                    monthly_ticket = monthly_ticket_elem.text.strip()
                except:
                    # 如果是特殊字体或其他反爬机制，记录无法解析
                    monthly_ticket = '需要特殊解析'
            
            # 打印调试信息
            print(f"提取到书籍：{title} - {author} - {full_category} - 月票：{monthly_ticket}")
            
            # 从详情页获取月票数量 - 新增
            monthly_ticket = fetch_book_monthly_ticket(book_id)
            
            books.append({
                '书名': title,
                '作者': author,
                '分类': full_category,
                '更新章节': update_chapter,
                '更新时间': update_time,
                '月票': monthly_ticket,
                '评分': ''
            })
        except Exception as e:
            print(f"解析错误: {e}")
            continue
    
    df = pd.DataFrame(books)
    df.to_csv(DATA_PATH, index=False, encoding='utf-8-sig')
    print(f'已采集{len(df)}本书，数据保存至{DATA_PATH}')
    
    # 如果没有采集到数据，则输出更详细的调试信息
    if len(df) == 0:
        print("未采集到数据，保存页面内容到debug_page.html以便调试")
        with open('debug_page.html', 'w', encoding='utf-8') as f:
            f.write(resp.text)

# 新增函数：获取书籍详情页的月票数量
def fetch_book_monthly_ticket(book_id):
    """
    从书籍详情页获取月票数量
    """
    detail_url = f"https://www.qidian.com/book/{book_id}/"
    print(f"获取书籍详情: {detail_url}")
    
    try:
        # 添加随机延时，避免频繁请求
        time.sleep(random.uniform(1, 3))
        
        resp = requests.get(detail_url, headers=get_headers(), proxies=PROXIES, timeout=10)
        
        # 处理可能的状态码问题
        if resp.status_code != 200:
            print(f"获取书籍详情失败，状态码: {resp.status_code}")
            return "获取失败"
        
        soup = BeautifulSoup(resp.text, 'html.parser')
        
        # 根据ID获取月票数量
        monthly_count_elem = soup.select_one('#monthCount')
        if monthly_count_elem:
            monthly_ticket = monthly_count_elem.text.strip()
            print(f"成功获取月票: {monthly_ticket}")
            return monthly_ticket
        else:
            # 尝试其他可能的选择器
            print("未找到月票元素，尝试其他选择器")
            # 保存详情页面以便调试
            with open(f'debug_detail_{book_id}.html', 'w', encoding='utf-8') as f:
                f.write(resp.text)
            return "未找到"
            
    except Exception as e:
        print(f"获取月票数量出错: {e}")
        return "获取出错"

# 2. 数据分析与可视化

def analyze_and_plot():
    today_str = get_today_str()
    # 读取分类名
    df = pd.read_csv(DATA_PATH)
    if '分类' in df.columns and not df.empty:
        category = df['分类'].iloc[0].split('/')[0]
    else:
        category = '未知'
    # 生成带日期和分类的输出目录
    figure_dir = f'output/figures/{today_str}/{category}/'
    global FIGURE_DIR
    FIGURE_DIR = figure_dir
    if not os.path.exists(FIGURE_DIR):
        os.makedirs(FIGURE_DIR)
    # 分类分布
    plt.figure(figsize=(8,6))
    df['分类'].value_counts().plot.pie(autopct='%1.1f%%')
    plt.title('分类分布', fontproperties=font_prop if 'font_prop' in globals() else None, fontsize=14)
    plt.ylabel('')
    plt.savefig(FIGURE_DIR + 'category_pie.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    # 删除高产作者相关代码
    # plt.figure(figsize=(10,6))
    # top_authors = df['作者'].value_counts().head(20)
    # sns.barplot(x=top_authors.values, y=top_authors.index)
    # plt.title('高产作者TOP20', fontproperties=font_prop if 'font_prop' in globals() else None, fontsize=14)
    # plt.xlabel('作品数量', fontproperties=font_prop if 'font_prop' in globals() else None, fontsize=12)
    # plt.savefig(FIGURE_DIR + 'author_bar.png', dpi=300, bbox_inches='tight')
    # plt.close()
    
    # 作者词云 - 确保使用正确的字体
    wc = WordCloud(font_path=font_path if os.path.exists(font_path) else 'msyh.ttc', 
                  width=800, height=400, background_color='white')
    author_text = ' '.join(df['作者'].astype(str))
    wc.generate(author_text)
    wc.to_file(FIGURE_DIR + 'author_wordcloud.png')
    
    # 月票分布 - 新增部分
    plt.figure(figsize=(10,6))
    # 确保月票列存在且为数值类型
    if '月票' in df.columns:
        # 尝试将月票转换为数值类型
        try:
            df['月票'] = pd.to_numeric(df['月票'], errors='coerce')
            # 移除可能的NaN值
            df_valid = df.dropna(subset=['月票'])
            if not df_valid.empty:
                # 获取月票前20的书籍
                top_tickets = df_valid.sort_values('月票', ascending=False).head(20)
                sns.barplot(x='月票', y='书名', data=top_tickets)
                plt.title('月票榜TOP20', fontproperties=font_prop if 'font_prop' in globals() else None, fontsize=14)
                plt.xlabel('月票数量', fontproperties=font_prop if 'font_prop' in globals() else None, fontsize=12)
                plt.tight_layout()
                plt.savefig(FIGURE_DIR + 'monthly_ticket_bar.png', dpi=300, bbox_inches='tight')
                print('已生成月票榜TOP20图表')
            else:
                print('没有有效的月票数据可供分析')
        except Exception as e:
            print(f'月票数据分析出错: {e}')
    else:
        print('数据中没有月票列，跳过月票分析')
    plt.close()
    
    # 其他分析可继续补充
    print('图表已生成至', FIGURE_DIR)

# 3. 生成报告

def generate_report():
    today_str = get_today_str()
    # 读取分类名
    df = pd.read_csv(DATA_PATH)
    if '分类' in df.columns and not df.empty:
        category = df['分类'].iloc[0].split('/')[0]
    else:
        category = '未知'
    figure_dir = f'figures/{today_str}/{category}/'
    with open('templates/report.html', encoding='utf-8') as f:
        template = f.read()
    html = template.replace('{{category_pie}}', f'{figure_dir}{category}_category_pie.png') \
        .replace('{{author_bar}}', f'{figure_dir}{category}_author_bar.png') \
        .replace('{{author_wordcloud}}', f'{figure_dir}{category}_author_wordcloud.png') \
        .replace('{{wordcount_hist}}', f'{figure_dir}{category}_monthly_ticket_bar.png')
    if not os.path.exists('output'):
        os.makedirs('output')
    report_path = f'output/{today_str}-report-{category}.html'
    global REPORT_PATH
    REPORT_PATH = report_path
    with open(REPORT_PATH, 'w', encoding='utf-8') as f:
        f.write(html)
    print('报告已生成:', REPORT_PATH)

if __name__ == '__main__':
    fetch_qidian_rank()
    analyze_and_plot()
    generate_report()