from fastapi import APIRouter, Query, HTTPException
from novelapp.common.common import returnData, setLog
from typing import Optional
import os
import pandas as pd
import sys
import requests
from bs4 import BeautifulSoup
import random
from datetime import datetime
import time
import ssl

import matplotlib.pyplot as plt
from wordcloud import WordCloud
import matplotlib
import platform
from matplotlib.font_manager import FontProperties
import asyncio
import aiohttp
from concurrent.futures import ThreadPoolExecutor
from novelapp.models.models import BookMonthlyTicket

plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
# 设置日志
logger = setLog('qdrank.log')

# 创建路由器
router = APIRouter()

# 获取当前日期字符串
def get_today_str():
    return datetime.now().strftime('%Y%m%d')

# 从URL获取分类名
def get_category_from_url(url):
    category_map = {
        '玄幻': 'chn21',
        '奇幻': 'chn1',
        '武侠': 'chn2',
        '仙侠': 'chn22',
        '都市': 'chn4',
        '现实': 'chn15',
        '军事': 'chn6',
        '历史': 'chn5',
        '游戏': 'chn7',
        '体育': 'chn8',
        '科幻': 'chn9',
        '诸天无限': 'chn20109',
        '悬疑': 'chn10',
        '轻小说': 'chn12',
        'VIp新作': 'chn0',
        '综合': 'yuepiao'
    }
    for category, key in category_map.items():
        if key in url:
            return category
    return None

USER_AGENTS = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.0 Safari/605.1.15',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
]
PROXIES = {
    # 'http': 'http://127.0.0.1:7890',
    # 'https': 'http://127.0.0.1:7890',
}
# 获取请求头
def get_headers():
    return {
        'User-Agent': random.choice(USER_AGENTS),
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Accept-Encoding': 'gzip, deflate, br, zstd',
        'Connection': 'keep-alive',
        'Cookie': 'e2=; e1=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_rank_19%22%2C%22eid%22%3A%22qd_A17%22%7D; e1=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A5%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_rank_19%22%2C%22eid%22%3A%22qd_C21%22%7D; e2=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A4%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_rank%22%2C%22eid%22%3A%22qd_C03%22%7D; e1=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A4%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_rank%22%2C%22eid%22%3A%22qd_C03%22%7D; e2=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_p_qidian%22%2C%22eid%22%3A%22qd_A16%22%7D; newstatisticUUID=1747370286_1970434159; _csrfToken=uu1Ds6uw2SRxvcxqBB2R7KDB406U6BkvNNQ85DFW; fu=1248052399; traffic_utm_referer=; supportwebp=true; e1=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_p_qidian%22%2C%22eid%22%3A%22qd_A16%22%7D; e2=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_p_qidian%22%2C%22eid%22%3A%22qd_A18%22%7D; w_tsfp=ltvuV0MF2utBvS0Q7a3qk0umHzsvdzg4h0wpEaR0f5thQLErU5mB1oV9vsP+OHHZ48xnvd7DsZoyJTLYCJI3dwMQE8/DItpF3l6VkoUjjt1FVBNjQ5jfX1NKI7wk5DNHfnhCNxS00jA8eIUd379yilkMsyN1zap3TO14fstJ019E6KDQmI5uDW3HlFWQRzaLbjcMcuqPr6g18L5a5W6N5FPzKApyC+hEhEzDh3weDClysxLoIeAPNU2qIs37SqA='
    }

def get_api_headers():
    return {
        'User-Agent': random.choice(USER_AGENTS),
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Accept-Encoding': 'gzip, deflate, br, zstd',
        'Connection': 'keep-alive',
        'Cookie': '_csrfToken=ZoIz5ZC9mNqWkzkwEKK1PkX1cUpHsSFOCo62iVNB; supportwebp=true; supportWebp=true; traffic_utm_referer=; Hm_lvt_f00f67093ce2f38f215010b699629083=1746687854,1747286214,1749012128; HMACCOUNT=297B39C2FDD346FE; _gid=GA1.2.1217807451.1749012128; listStyle=2; Hm_lvt_1d7d9ab48732e057a5e22e962e5797a6=1749013789; newstatisticUUID=1749015446_1203551333; fu=1303319163; tgw_l7_route=654dfda1b430ca99e04ffc622aa0bf62; traffic_search_engine=; se_ref=; Hm_lpvt_f00f67093ce2f38f215010b699629083=1749025666; _ga_PFYW0QLV3P=GS2.1.s1749025665$o7$g0$t1749025669$j56$l0$h0; _ga_FZMMH98S83=GS2.1.s1749025665$o7$g0$t1749025669$j56$l0$h0; Hm_lpvt_1d7d9ab48732e057a5e22e962e5797a6=1749026001; _gat_gtag_UA_199934072_1=1; _ga_VMQL7235X0=GS2.1.s1749024921$o4$g1$t1749026001$j60$l0$h0; _ga=GA1.1.876310442.1746687854; _ga_D20NXNVDG2=GS2.1.s1749024921$o4$g1$t1749026001$j60$l0$h0'
    }





# 分析数据并生成图表
# 修改月票数据处理部分
def analyze_and_plot(csv_path, category):
    """分析数据并生成图表"""
    today_str = get_today_str()
    # 生成带日期和分类的输出目录
    figure_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), f'../../output/figures/{today_str}/{category}/'))
    if not os.path.exists(figure_dir):
        os.makedirs(figure_dir)
    
    logger.info(platform.system())
    
    if platform.system() == 'Linux':
        font_path = '/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc'
        if not os.path.exists(font_path):
            font_path = '/usr/share/fonts/truetype/wqy/wqy-microhei.ttc'
        plt.rcParams['font.sans-serif'] = [font_path]
        plt.rcParams['font.family'] = 'sans-serif'
        plt.rcdefaults()  # 重置默认配置
    elif platform.system() == 'Windows':
        font_path = 'C:/Windows/Fonts/simhei.ttf'
        plt.rcParams['font.sans-serif'] = ['SimHei']
        
    logger.info(font_path)
    
    font = FontProperties(fname=font_path)  # 创建字体对象
    # 读取CSV数据
    df = pd.read_csv(csv_path)
    if df.empty:
        logger.error("CSV文件为空，无法生成图表")
        return None
    
    # 生成图表
    try:
        # 1. 分类分布饼图
        plt.figure(figsize=(8, 6))
        sizes = df['分类'].value_counts()
        labels = sizes.index
        
        # 生成饼图并获取文本对象
        patches, texts, autotexts = plt.pie(
            sizes, 
            labels=labels, 
            autopct='%1.1f%%',
            startangle=90
        )
        plt.axis('equal')  # 使饼图为正圆形
        plt.title(f'{category}分类分布', fontproperties=font)
        
        # 设置饼图文本的字体
        for text in texts + autotexts:
            text.set_fontproperties(font)
            text.set_size(10)
        
        category_pie_path = os.path.join(figure_dir, f'{category}_category_pie.png')
        plt.savefig(category_pie_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        # 2. 作者词云（显式指定字体路径）
        wc = WordCloud(
            font_path=font_path,  # 关键：直接使用字体文件路径
            width=800, height=400, background_color='white'
        )
        author_text = ' '.join(df['作者'].astype(str))
        wc.generate(author_text)
        author_wordcloud_path = os.path.join(figure_dir, f'{category}_author_wordcloud.png')
        wc.to_file(author_wordcloud_path)
        
        # 3. 月票分布柱状图（自动使用全局字体）
        if '月票' in df.columns:
            plt.figure(figsize=(10, 6))
            df['月票'] = df['月票'].astype(str).str.replace(',', '')
            df['月票'] = pd.to_numeric(df['月票'], errors='ignore')
            df_valid = df[pd.to_numeric(df['月票'], errors='coerce').notna()]
            
            if not df_valid.empty:
                top_tickets = df_valid.sort_values('月票', ascending=False).head(20)
                plt.barh(top_tickets['书名'], top_tickets['月票'])
                
                # 设置标题和标签的字体
                plt.title(f'{category}月票榜TOP20', fontproperties=font)
                plt.xlabel('月票数量', fontproperties=font)
                plt.ylabel('书名', fontproperties=font)
                
                # 设置 y 轴刻度标签的字体
                for tick in plt.gca().get_yticklabels():
                    tick.set_fontproperties(font)
                
                plt.gca().invert_yaxis()
                monthly_ticket_bar_path = os.path.join(figure_dir, f'{category}_monthly_ticket_bar.png')
                plt.savefig(monthly_ticket_bar_path, dpi=300, bbox_inches='tight')
            plt.close()
        
        logger.info(f'图表已生成至 {figure_dir}')
        return figure_dir
    except Exception as e:
        logger.error(f"生成图表失败: {str(e)}")
        return None


# 修改原来的 fetch_book_monthly_ticket 方法
def fetch_book_monthly_ticket(book_id, title=None, author=None):
    """
    从书籍详情页获取月票数量，并优先查询数据库
    """
    if title and author:
        # 首先尝试从数据库获取
        monthly_ticket = BookMonthlyTicket.get_monthly_ticket(title, author)
        if monthly_ticket is not None:
            logger.info(f"从数据库获取月票: {monthly_ticket}")
            return monthly_ticket
    
    detail_url = f"https://www.qidian.com/book/{book_id}/"
    logger.info(f"获取书籍详情: {detail_url}")
    
    try:
        # 添加随机延时，避免频繁请求
        time.sleep(random.uniform(1, 3))
        
        resp = requests.get(detail_url, headers=get_headers(), timeout=10)
        
        # 处理可能的状态码问题
        if resp.status_code != 200:
            logger.error(f"获取书籍详情失败，状态码: {resp.status_code}")
            return "获取失败"
        
        soup = BeautifulSoup(resp.text, 'html.parser')
        
        # 根据ID获取月票数量
        monthly_count_elem = soup.select_one('#monthCount')
        if monthly_count_elem:
            monthly_ticket = monthly_count_elem.text.strip()
            logger.info(f"成功获取月票: {monthly_ticket}")
            
            # 如果有书名和作者信息，保存到数据库
            if title and author:
                BookMonthlyTicket.set_monthly_ticket(title, author, monthly_ticket)
            
            # 确保返回的是字符串
            return str(monthly_ticket)
        else:
            # 尝试其他可能的选择器
            logger.warning("未找到月票元素，尝试其他选择器")
            return "未找到"
            
    except Exception as e:
        logger.error(f"获取月票数量出错: {str(e)}")
        return "获取出错"



        
@router.get("/list/")
async def get_rank_list(
    category: str = Query(None, description="排行榜分类，如'玄幻'、'奇幻'、'武侠'、'仙侠'等")
):
    try:
        # 默认分类为综合
        if not category:
            category = '综合'
        
        today_str = get_today_str()
        data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../data'))
        csv_name = f'qidian_rank_{today_str}_{category}.csv'
        csv_path = os.path.join(data_dir, csv_name)
        
        # 检查是否存在当日CSV文件
        if not os.path.exists(csv_path):
            logger.info(f"未找到当日{category}分类的CSV文件，开始爬取数据")
            csv_path = await async_fetch_qidian_rank(category)
            if not csv_path:
                return returnData(500, f"爬取{category}排行榜数据失败", None)
        
        # 检查是否存在当日分析图片
        figure_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), f'../../output/figures/{today_str}/{category}/'))
        if not os.path.exists(figure_dir) or not os.listdir(figure_dir):
            logger.info(f"未找到当日{category}分类的分析图片，开始生成")
            figure_dir = analyze_and_plot(csv_path, category)
            if not figure_dir:
                return returnData(500, f"生成{category}分析图表失败", None)
        
        # 读取CSV数据
        df = pd.read_csv(csv_path)
        
        # 确保月票数据是字符串格式，避免JSON序列化问题
        if '月票' in df.columns:
            df['月票'] = df['月票'].astype(str)

        # 将 NaN 值替换为 None，以便正确序列化为 JSON null
        # 同时处理其他列可能存在的特殊浮点数问题
        df = df.replace({pd.NA: None, float('inf'): 'Infinity', float('-inf'): '-Infinity'})
        # 如果评分列是空的，并且被读取为 NaN，上述操作会将其转为 None
        # 如果评分列需要是空字符串而不是 null，可以这样做：
        if '评分' in df.columns:
            df['评分'] = df['评分'].fillna('') # 将评分列的 NaN 替换为空字符串
        
        # 获取所有数据
        all_data = df.to_dict('records')
        
        # 获取图片列表
        image_files = [f for f in os.listdir(figure_dir) if f.endswith('.png')]
        # 修改图片路径为 URL 链接
        base_url = "https://harmony.airdidi.com"
        fdir = "output"  # 与main.py中的StaticFiles配置保持一致

        image_paths = [f"{base_url}/{fdir}/{today_str}/{category}/{f}" for f in image_files]
        
        # 构建返回数据
        result = {
            "category": category,
            "total_count": len(df),
            "books": all_data,
            "images": [
                {
                    "name": os.path.basename(path),
                    "path": path
                } for path in image_paths
            ]
        }
        
        return returnData(200, "查询成功", result)
    
    except Exception as e:
        logger.error(f"获取排行榜数据失败: {str(e)}")
        return returnData(500, f"获取排行榜数据失败: {str(e)}", None)



@router.get("/asyn-csv/")
async def get_async_rank_list(
    category: str = Query(None, description="排行榜分类，如'玄幻'、'奇幻'、'武侠'、'仙侠'等")
):

    await async_fetch_qidian_rank(category)

async def async_fetch_qidian_rank(category):
    """异步爬取起点排行榜数据，保存为csv"""
    # 确保data目录存在

    logger.warning(f"开始异步爬取起点排行榜数据，保存为csv")

    data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../data'))
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)
    
    # 根据分类选择URL
    url_map = {
        '玄幻': '21',
        '奇幻': '1',
        '武侠': '2',
        '仙侠': '22',
        '都市': '4',
        '现实': '15',
        '军事': '6',
        '历史': '5',
        '游戏': '7',
        '体育': '8',
        '科幻': '9',
        '诸天无限': '20109',
        '悬疑': '10',
        '轻小说': '12',
        '综合': '-1'
    }
    catId = url_map.get(category, '-1')

    # print(catId)
    current_year_month = datetime.now().strftime('%Y%m')

    url = f'https://m.qidian.com/majax/rank/yuepiaolist?gender=male&pageNum=1&catId={catId}&yearmonth={current_year_month}&_csrfToken=ZoIz5ZC9mNqWkzkwEKK1PkX1cUpHsSFOCo62iVNB'

    today_str = get_today_str()
    csv_name = f'qidian_rank_{today_str}_{category}.csv'
    csv_path = os.path.join(data_dir, csv_name)
    # 增强的重试机制
    max_retries = 3
    retry_count = 0
    
    ssl_context = ssl.create_default_context()
    ssl_context.check_hostname = False
    ssl_context.verify_mode = ssl.CERT_NONE
    
    async with aiohttp.ClientSession(headers=get_api_headers()) as session:
        while retry_count < max_retries:
            try:
                async with session.get(
                    url, 
                    proxy=PROXIES.get('http') if PROXIES else None,
                    timeout=10,
                    ssl=ssl_context
                ) as resp:
                    print(f"API状态码: {resp.status}")
                    if resp.status != 200:
                        print(f"请求失败，状态码: {resp.status}")
                        retry_count += 1
                        await asyncio.sleep(random.uniform(1, 3))
                        continue
                    
                    data = await resp.json()
                    if data['code'] != 0:
                        print(f"API返回错误: {data['msg']}")
                        retry_count += 1
                        await asyncio.sleep(random.uniform(1, 3))
                        continue
                    
                    records = data['data']['records']
                    books = []
                    for record in records:
                        # 处理月票数据,如果包含"万月票"则乘以10000
                        ticket_str = record['rankCnt']
                        if '万月票' in ticket_str:
                            ticket_num = float(ticket_str.replace('万月票', '')) * 10000
                        else:
                            ticket_num = float(ticket_str.replace('月票', ''))
                        
                        books.append({
                            '书名': record['bName'],
                            '作者': record['bAuth'],
                            '分类': f"{record['cat']}/{record['subCat']}",
                            '字数': record['cnt'],
                            '月票': ticket_num,
                            '排名': record['rankNum']
                        })
                    
                    # 保存数据到CSV
                    df = pd.DataFrame(books)
                    df.to_csv(csv_path, index=False, encoding='utf-8-sig')
                    logger.info(f'已采集{len(df)}本书，数据保存至{csv_path}')
                    
                    return csv_path
            except Exception as e:
                print(f"API请求出错: {e}")
                retry_count += 1
                await asyncio.sleep(random.uniform(1, 3))
    
    print(f"已达到最大重试次数({max_retries})，请求失败")
    return None