import uvicorn
from fastapi import FastAPI, Depends, HTTPException, Request, Form, BackgroundTasks
from fastapi.security import OAuth2PasswordRequestForm
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse, RedirectResponse, FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
from typing import Optional, List
import sqlite3
import hashlib
import secrets
import datetime
import requests
import jieba
import re
import os
import time
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
from dateutil import parser
# from datetime import datetime
import logging
from collections import Counter
from wordcloud import WordCloud
import html
import urllib.parse
from pyecharts.charts import Line, Timeline, Bar, Pie
from pyecharts import options as opts
from pyecharts.globals import ThemeType
import httpx
import uuid

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'WenQuanYi Micro Hei', 'KaiTi']
plt.rcParams['axes.unicode_minus'] = False

# 通用请求头
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    'Connection': 'keep-alive',
    'Accept-Encoding': 'gzip, deflate'
}

# 常见新闻网站解析规则
SITE_RULES = {
    # 新浪新闻
    'sina.com.cn': {
        'container': ['div.news-item', 'div.news-card', 'div.feed-card', 'div.news-box', '.news-item'],
        'title': ['h2', 'h1', 'h3', 'h4', '.title', '.tit'],
        'content': ['p', 'div.article', '.content', '.txt', '.info', '.text'],
        'link': lambda a: a.get('href') if a else None,
        'encoding': 'utf-8'
    },
    # 央视新闻
    'cctv.com': {
        'container': ['.cnt_bd', '.article', '.content_list', '.list', '.box'],
        'title': ['h1', 'h2', '.tit', '.title'],
        'content': ['.content', '.cnt', '.text', '.con', 'p'],
        'link': lambda a: a.get('href') if a else None,
        'encoding': 'utf-8'
    },
    # 腾讯新闻
    'qq.com': {
        'container': ['div.news-list', 'div.news-item', 'div.news-piece', 'article.item', '.item'],
        'title': ['h2', 'h1', 'h3', '.tit', '.title', '.header'],
        'content': ['p', 'div.content', '.desc', '.txt', '.article-content'],
        'link': lambda a: a.get('href') if a else None,
        'encoding': 'utf-8'
    },
    # 网易新闻
    '163.com': {
        'container': ['div.news-item', 'div.post', 'div.news_li', 'li.news', '.news'],
        'title': ['h2', 'h1', 'h3', '.title', '.news-title', '.news_title'],
        'content': ['p', 'div.post_body', '.des', '.news-content', '.post_content'],
        'link': lambda a: a.get('href') if a else None,
        'encoding': 'utf-8'
    },
    # 搜狐新闻
    'sohu.com': {
        'container': ['div.news-card', 'div.article', 'div.item', 'li.news', '.article'],
        'title': ['h2', 'h1', 'h3', '.title', '.tit', '.news_title'],
        'content': ['p', 'div.article-content', '.content', '.des', '.article-text'],
        'link': lambda a: a.get('href') if a else None,
        'encoding': 'utf-8'
    },
    # 人民网
    'people.com.cn': {
        'container': ['div.heading', 'div.artDet', '.text', '.box_con', '.conbox'],
        'title': ['h1', 'h2', '.tit', '.title'],
        'content': ['p', 'div.content', '.text_con', '.con', '.box_con'],
        'link': lambda a: a.get('href') if a else None,
        'encoding': 'gbk'
    },
    # 通用规则
    'default': {
        'container': ['article', 'div.article', 'div.post', 'div.news-item',
                      'div.card', 'div.item', 'li.news', 'section.news'],
        'title': ['h1', 'h2', 'h3', '.title', '.tit', '.headline'],
        'content': ['p', 'div.content', 'div.article-body', 'div.post-content',
                    '.text', '.desc', '.summary'],
        'link': lambda a: a.get('href') if a else None,
        'encoding': 'utf-8'
    }
}

# 敏感词库 - 实际应用中需要更完整的词库
SENSITIVE_WORDS = [
    "敏感词", "禁止词", "违规词", "限制词", "违法", "非法", "暴恐",
    "恐怖", "极端", "分裂", "邪教", "淫秽", "色情", "赌博",
    "诈骗", "毒品", "枪支", "爆炸", "暴力", "血腥", "反动"
]

# 自定义停用词列表
CUSTOM_STOPWORDS = [
    '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '个', '上', '也', '很', '到', '说', '要',
    '去', '你', '会', '着', '没有', '看', '好', '自己', '这', '那', '就', '又', '但', '而', '与', '及', '或', '且',
    '虽', '即',
    '因', '如', '于', '为', '对', '但', '并', '还', '更', '再', '已', '将', '等', '被', '后', '以', '之', '中', '年',
    '月', '日',
    '时', '分', '秒', '第', '版', '条', '期', '报', '网', '讯', '记者', '编辑', '来源', '客户端', 'APP', '微信', '微博',
    '公众号'
]

# 创建静态文件目录
if not os.path.exists("static"):
    os.makedirs("static")
if not os.path.exists("static/results"):
    os.makedirs("static/results")

# 创建FastAPI应用
app = FastAPI(title="资讯分析工具", description="一个强大的资讯分析平台")
templates = Jinja2Templates(directory="templates")

# 挂载静态文件目录
app.mount("/static", StaticFiles(directory="static"), name="static")


# 数据库初始化
def create_tables():
    conn = sqlite3.connect('news_analysis.db')
    c = conn.cursor()

    # 用户表
    c.execute('''CREATE TABLE IF NOT EXISTS users
                 (id INTEGER PRIMARY KEY AUTOINCREMENT,
                 username TEXT UNIQUE NOT NULL,
                 password_hash TEXT NOT NULL,
                 api_key TEXT UNIQUE)''')

    # 爬取任务表 - 修改：添加news_data_path和word_frequency_path列
    c.execute('''CREATE TABLE IF NOT EXISTS crawl_tasks
                 (id INTEGER PRIMARY KEY AUTOINCREMENT,
                 user_id INTEGER NOT NULL,
                 url TEXT NOT NULL,
                 created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                 completed_at TIMESTAMP,
                 status TEXT DEFAULT 'pending',
                 wordcloud_path TEXT,
                 word_freq_bar_path TEXT,
                 word_pie_path TEXT,
                 news_data_path TEXT,  -- 新增：新闻数据CSV文件路径
                 word_frequency_path TEXT,  -- 新增：词频数据CSV文件路径
                 FOREIGN KEY (user_id) REFERENCES users (id))''')

    conn.commit()
    conn.close()


create_tables()


# 数据模型
class UserCreate(BaseModel):
    username: str
    password: str


class NewsCrawlRequest(BaseModel):
    url: str
    api_key: str


class NewsItem(BaseModel):
    title: str
    content: str
    url: str
    publish_time: Optional[str] = None


class TaskStatus(BaseModel):
    id: int
    status: str
    created_at: datetime.datetime


# 工具函数
def get_db_connection():
    conn = sqlite3.connect('news_analysis.db')
    conn.row_factory = sqlite3.Row
    return conn


def hash_password(password: str) -> str:
    return hashlib.sha256(password.encode()).hexdigest()


def verify_password(password: str, password_hash: str) -> bool:
    return hash_password(password) == password_hash


def generate_api_key() -> str:
    return secrets.token_urlsafe(32)


def get_user_by_username(username: str):
    conn = get_db_connection()
    user = conn.execute('SELECT * FROM users WHERE username = ?', (username,)).fetchone()
    conn.close()
    return user


def get_user_by_api_key(api_key: str):
    conn = get_db_connection()
    user = conn.execute('SELECT * FROM users WHERE api_key = ?', (api_key,)).fetchone()
    conn.close()
    return user


def authenticate_user(username: str, password: str):
    user = get_user_by_username(username)
    if not user:
        return False
    if not verify_password(password, user['password_hash']):
        return False
    return user


def get_current_user(api_key: str = Depends(lambda x: x.headers.get('X-API-Key'))):
    if not api_key:
        raise HTTPException(status_code=401, detail="认证信息缺失")
    user = get_user_by_api_key(api_key)
    if not user:
        raise HTTPException(status_code=401, detail="无效的API密钥")
    return user


# 网页内容分析函数
def filter_sensitive_words(text: str) -> str:
    """过滤敏感词"""
    if not text:
        return ""

    for word in SENSITIVE_WORDS:
        if word in text:
            # 使用正则表达式进行不区分大小写的替换
            pattern = re.compile(re.escape(word), re.IGNORECASE)
            text = pattern.sub('*' * len(word), text)
    return text


def get_site_rules(url: str) -> dict:
    """获取适合当前网站的解析规则"""
    domain = urllib.parse.urlparse(url).netloc
    for site, rules in SITE_RULES.items():
        if site in domain:
            return rules
    return SITE_RULES['default']


def detect_encoding(content: bytes) -> str:
    """检测文本编码"""
    try:
        # 尝试常见编码
        for encoding in ['utf-8', 'gbk', 'gb2312', 'iso-8859-1']:
            try:
                content.decode(encoding)
                return encoding
            except UnicodeDecodeError:
                continue
        # 如果常见编码都不行，返回utf-8并忽略错误
        return 'utf-8'
    except:
        return 'utf-8'


def fetch_web_content(url: str, max_retries=3) -> bytes:
    """获取网页内容，带有重试机制和编码处理"""
    for attempt in range(max_retries):
        try:
            logging.info(f"正在请求 {url} (尝试 {attempt + 1}/{max_retries})...")
            response = requests.get(url, headers=HEADERS, timeout=15)
            response.raise_for_status()

            # 根据URL确定可能的编码
            site_rules = get_site_rules(url)
            encoding = site_rules.get('encoding', 'utf-8')

            # 尝试检测真实编码
            detected_encoding = detect_encoding(response.content)
            if detected_encoding:
                encoding = detected_encoding
                logging.info(f"检测到编码: {detected_encoding}")

            # 使用正确的编码解码内容
            try:
                return response.content.decode(encoding, errors='replace').encode('utf-8')
            except:
                # 如果失败，尝试其他常见中文编码
                try:
                    return response.content.decode('gbk', errors='replace').encode('utf-8')
                except:
                    return response.content.decode('utf-8', errors='replace').encode('utf-8')

        except requests.RequestException as e:
            logging.error(f"请求错误: {e}")
            time.sleep(2)  # 等待后重试

    logging.error(f"无法获取 {url} 的内容")
    return b''


def clean_text(text: str) -> str:
    """清理文本中的HTML实体和多余空格"""
    if not text:
        return ""

    # 清理Unicode字符串
    text = html.unescape(text)
    # 替换换行符和制表符
    text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
    # 移除多余空格
    text = re.sub(r'\s+', ' ', text).strip()
    # 清理控制字符
    text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', text)
    return text


def extract_news_items(html_content: bytes, base_url: str) -> list:
    """从HTML中提取新闻项 - 改进版"""
    if not html_content:
        return []

    logging.info("正在解析网页内容...")

    # 处理编码问题
    try:
        content_str = html_content.decode('utf-8', errors='replace')
    except:
        try:
            content_str = html_content.decode('gbk', errors='replace')
        except:
            content_str = str(html_content, errors='replace')

    # 解析HTML
    site_rules = get_site_rules(base_url)

    try:
        soup = BeautifulSoup(content_str, 'html.parser')
    except Exception as e:
        logging.error(f"解析HTML失败: {e}")
        return []

    # 尝试不同的容器选择器
    containers = []
    for selector in site_rules['container']:
        try:
            found = soup.select(selector)
            if found:
                containers.extend(found)
                logging.info(f"使用选择器 '{selector}' 找到 {len(found)} 个容器")
        except Exception as e:
            logging.warning(f"使用选择器 '{selector}' 失败: {e}")

    # 如果没有找到容器，尝试更通用的选择器
    if not containers:
        logging.info("未找到匹配的容器，尝试更通用的选择器...")
        generic_selectors = ['div', 'article', 'section', 'li', 'tr', 'ul']
        for selector in generic_selectors:
            try:
                found = soup.select(selector)
                if found:
                    containers.extend(found)
                    logging.info(f"使用通用选择器 '{selector}' 找到 {len(found)} 个容器")
            except Exception as e:
                logging.warning(f"使用通用选择器 '{selector}' 失败: {e}")

    if not containers:
        logging.warning("警告: 未找到任何新闻容器")
        return []

    news_items = []
    logging.info(f"处理 {len(containers)} 个容器...")

    for container in containers:
        try:
            # 提取标题
            title = ""
            for selector in site_rules['title']:
                try:
                    title_element = container.select_one(selector)
                    if title_element:
                        title = clean_text(title_element.get_text())
                        break
                except:
                    continue

            # 提取链接
            link = ""
            link_element = container.find('a')
            if link_element and link_element.get('href'):
                link = link_element.get('href')
                if link and not link.startswith(('http:', 'https:')):
                    link = urllib.parse.urljoin(base_url, link)

            # 提取内容
            content = ""
            for selector in site_rules['content']:
                try:
                    content_element = container.select_one(selector)
                    if content_element:
                        content = clean_text(content_element.get_text())
                        if content:
                            break
                except:
                    continue

            # 如果标题和内容都为空，尝试从链接文本中获取
            if not title and not content and link_element:
                title = clean_text(link_element.get_text())

            # 如果仍然没有内容，跳过
            if not title and not content:
                continue

            # 提取发布时间
            publish_time = ""
            # 首先尝试time标签
            time_element = container.find('time')
            if time_element:
                publish_time = time_element.get('datetime', '') or clean_text(time_element.get_text())

            # 如果没有datetime属性，尝试其他可能包含时间的元素
            if not publish_time:
                time_candidates = container.find_all(class_=re.compile(r'time|date|pub', re.I))
                for candidate in time_candidates:
                    try:
                        candidate_text = clean_text(candidate.get_text())
                        # 简单验证是否包含时间格式
                        if re.search(r'\d{4}[-/]\d{1,2}[-/]\d{1,2}|\d{1,2}:\d{2}', candidate_text):
                            publish_time = candidate_text
                            break
                    except:
                        continue

            item = {
                "title": title,
                "content": content,
                "url": link,
                "publish_time": publish_time
            }
            news_items.append(item)

        except Exception as e:
            logging.error(f"解析容器时出错: {e}")
            continue

    logging.info(f"成功提取 {len(news_items)} 条新闻")
    return news_items


def process_text(text: str) -> list:
    """处理文本：分词、过滤停用词和非中文内容"""
    # 使用正则表达式识别中文字符
    chinese_regex = re.compile(r'[\u4e00-\u9fff]')

    # 使用jieba进行分词
    words = jieba.cut(text)

    # 过滤条件：长度大于1，不是停用词，且包含中文字符
    filtered_words = [
        word for word in words
        if len(word) > 1 and
           word not in CUSTOM_STOPWORDS and
           chinese_regex.search(word)
    ]

    return filtered_words


def generate_wordcloud(text: str, output_path: str) -> None:
    """生成词云图"""
    if not text.strip():
        logging.warning("文本内容为空，无法生成词云")
        return

    # 处理文本
    words = process_text(text)
    text_for_wordcloud = ' '.join(words)

    # 生成词云
    wc = WordCloud(
        font_path='simhei.ttf',
        width=1000,
        height=600,
        background_color='white',
        max_words=200,
        colormap='tab20',
        max_font_size=150,
        random_state=42
    )

    if text_for_wordcloud:
        wc.generate(text_for_wordcloud)
    else:
        wc.generate("没有有效的中文内容可生成词云")

    # 保存词云图
    wc.to_file(output_path)
    logging.info(f"词云图已生成: {output_path}")


def generate_word_frequency_chart(text: str, output_path: str, top_n: int = 20) -> None:
    """生成词频分布图（柱状图）"""
    if not text.strip():
        logging.warning("文本内容为空，无法生成词频分布图")
        return

    # 处理文本
    words = process_text(text)

    # 统计词频
    word_freq = Counter(words)
    top_words = word_freq.most_common(top_n)

    if not top_words:
        logging.warning("没有足够的有效词汇生成词频分布图")
        return

    # 准备数据
    words_list = [word for word, _ in top_words]
    counts_list = [count for _, count in top_words]

    # 创建目录
    os.makedirs(os.path.dirname(output_path), exist_ok=True)

    # 生成词频分布图
    bar = (
        Bar(init_opts=opts.InitOpts(theme=ThemeType.CHALK))
        .add_xaxis(words_list)
        .add_yaxis("出现频次", counts_list)
        .set_global_opts(
            title_opts=opts.TitleOpts(title=f"标题词频TOP{top_n}"),
            xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=45)),
            datazoom_opts=opts.DataZoomOpts(orient="horizontal")
        )
    )

    # 保存词频分布图
    bar.render(output_path)
    logging.info(f"词频分布图已生成: {output_path}")


def generate_word_pie_chart(text: str, output_path: str, top_n: int = 10) -> None:
    """生成词频饼图（主题分布）"""
    if not text.strip():
        logging.warning("文本内容为空，无法生成词频饼图")
        return

    # 处理文本
    words = process_text(text)

    # 统计词频
    word_freq = Counter(words)
    top_words = word_freq.most_common(top_n)

    if not top_words:
        logging.warning("没有足够的有效词汇生成词频饼图")
        return

    # 生成主题分布饼图
    pie = (
        Pie(init_opts=opts.InitOpts(theme=ThemeType.ROMA))
        .add(
            "主题分布",
            [list(item) for item in top_words],
            radius=["30%", "60%"],
            label_opts=opts.LabelOpts(formatter="{b}: {d}%")
        )
        .set_global_opts(
            title_opts=opts.TitleOpts(title=f"标题主题分布TOP{top_n}"),
            legend_opts=opts.LegendOpts(orient="vertical", pos_top="15%", pos_left="2%")
        )
    )

    # 保存主题分布饼图
    pie.render(output_path)
    logging.info(f"词频主题分布图已生成: {output_path}")


def filter_short_titles(df: pd.DataFrame, min_length: int = 8) -> pd.DataFrame:
    """过滤掉标题长度小于指定值的记录"""
    return df[df['title'].apply(lambda x: len(str(x).strip()) >= min_length)]


def analyze_news_task(url: str, task_id: int):
    """后台执行分析任务"""
    conn = get_db_connection()
    try:
        # 更新任务状态为进行中
        conn.execute("UPDATE crawl_tasks SET status='processing' WHERE id=?", (task_id,))
        conn.commit()

        # 获取网页内容
        html_content = fetch_web_content(url)
        if not html_content:
            raise Exception("无法获取网页内容")

        # 提取新闻项
        news_items = extract_news_items(html_content, url)

        if not news_items:
            # 尝试备用解析方案
            logging.info("主解析失败，尝试备用解析...")
            try:
                site_rules = get_site_rules(url)
                encoding = site_rules.get('encoding', 'utf-8')

                try:
                    content_str = html_content.decode(encoding, errors='replace')
                except:
                    content_str = html_content.decode('utf-8', errors='replace')

                soup = BeautifulSoup(content_str, 'html.parser')

                # 尝试提取所有标题和链接
                links = soup.find_all('a')
                news_items = []
                for link in links:
                    try:
                        title = clean_text(link.get_text())
                        href = link.get('href', '')

                        if href and not href.startswith(('http:', 'https:')):
                            href = urllib.parse.urljoin(url, href)

                        if title and len(title) > 3:  # 过滤过短的标题
                            news_items.append({
                                "title": title,
                                "content": "",
                                "url": href,
                                "publish_time": ""
                            })
                    except:
                        continue

                logging.info(f"备用解析找到 {len(news_items)} 条新闻")
            except Exception as e:
                logging.error(f"备用解析失败: {e}")

        if not news_items:
            raise Exception("未找到新闻内容")

        # 创建结果目录
        result_dir = f"static/results/{task_id}"
        os.makedirs(result_dir, exist_ok=True)

        # 保存新闻数据为CSV文件
        news_data_path = f"{result_dir}/news_data.csv"
        news_df = pd.DataFrame(news_items)
        news_df['cleaned_content'] = news_df['content'].apply(filter_sensitive_words).astype(str)
        news_df.to_csv(news_data_path, index=False, encoding='utf-8-sig')
        logging.info(f"新闻数据已保存至: {news_data_path}")

        # 从新闻数据中收集所有标题文本
        titles = news_df['title'].tolist()
        # 如果标题数量不足，使用内容补充
        if len(titles) < 10:
            titles.extend(news_df['content'].tolist())

        if not titles:
            raise Exception("没有足够的数据进行分析")

        # 合并所有文本
        all_text = " ".join(titles)

        # 生成词云
        wordcloud_path = f"{result_dir}/wordcloud.png"
        generate_wordcloud(all_text, wordcloud_path)

        # 生成词频分布图
        word_freq_bar_path = f"{result_dir}/word_freq_bar.html"
        generate_word_frequency_chart(all_text, word_freq_bar_path)

        # 生成主题分布图
        word_pie_path = f"{result_dir}/word_pie.html"
        generate_word_pie_chart(all_text, word_pie_path)

        # 保存词频数据为CSV文件
        words = process_text(all_text)
        word_freq = Counter(words)
        word_frequency_path = f"{result_dir}/word_frequency.csv"
        with open(word_frequency_path, 'w', encoding='utf-8-sig') as f:
            f.write("word,frequency\n")
            for word, freq in word_freq.most_common():
                f.write(f"{word},{freq}\n")
        logging.info(f"词频数据已保存至: {word_frequency_path}")

        # 更新任务状态为完成
        conn.execute(
            "UPDATE crawl_tasks SET status='completed', completed_at=CURRENT_TIMESTAMP, "
            "wordcloud_path=?, word_freq_bar_path=?, word_pie_path=?, "
            "news_data_path=?, word_frequency_path=? WHERE id=?",
            (wordcloud_path, word_freq_bar_path, word_pie_path,
             news_data_path, word_frequency_path, task_id)
        )
        conn.commit()

        logging.info(f"分析任务 {task_id} 完成")

    except Exception as e:
        logging.error(f"分析任务 {task_id} 失败: {e}")
        conn.execute("UPDATE crawl_tasks SET status='failed' WHERE id=?", (task_id,))
        conn.commit()
    finally:
        conn.close()


# API路由
@app.post("/api/register")
def register(user: UserCreate):
    """用户注册"""
    conn = get_db_connection()
    existing_user = conn.execute('SELECT * FROM users WHERE username = ?', (user.username,)).fetchone()
    if existing_user:
        raise HTTPException(status_code=400, detail="用户名已存在")

    password_hash = hash_password(user.password)
    api_key = generate_api_key()

    conn.execute(
        'INSERT INTO users (username, password_hash, api_key) VALUES (?, ?, ?)',
        (user.username, password_hash, api_key)
    )
    conn.commit()
    conn.close()

    return {"message": "注册成功", "api_key": api_key}


@app.post("/api/login")
def login(form_data: OAuth2PasswordRequestForm = Depends()):
    """用户登录"""
    user = authenticate_user(form_data.username, form_data.password)
    if not user:
        raise HTTPException(status_code=400, detail="用户名或密码错误")

    return {"message": "登录成功", "api_key": user['api_key']}


@app.post("/api/analyze")
async def analyze_news(request: NewsCrawlRequest, background_tasks: BackgroundTasks):
    """创建分析任务"""
    user = get_user_by_api_key(request.api_key)
    if not user:
        raise HTTPException(status_code=401, detail="无效的API密钥")

    conn = get_db_connection()
    # 创建任务记录
    task_id = conn.execute(
        'INSERT INTO crawl_tasks (user_id, url, status) VALUES (?, ?, ?) RETURNING id',
        (user['id'], request.url, 'pending')
    ).lastrowid
    conn.commit()
    conn.close()

    # 在后台执行分析任务
    background_tasks.add_task(analyze_news_task, request.url, task_id)

    return {"message": "分析任务已创建", "task_id": task_id}


@app.get("/api/tasks/{task_id}")
def get_task_status(task_id: int, api_key: str = Depends(lambda x: x.headers.get('X-API-Key'))):
    """获取任务状态"""
    user = get_user_by_api_key(api_key)
    if not user:
        raise HTTPException(status_code=401, detail="无效的API密钥")

    conn = get_db_connection()
    task = conn.execute(
        'SELECT * FROM crawl_tasks WHERE id = ? AND user_id = ?',
        (task_id, user['id'])
    ).fetchone()
    conn.close()

    if not task:
        raise HTTPException(status_code=404, detail="任务未找到")

    return dict(task)


# 网页界面路由
@app.get("/", response_class=HTMLResponse)
def index(request: Request):
    return templates.TemplateResponse("index.html", {"request": request})


@app.get("/register", response_class=HTMLResponse)
def register_page(request: Request):
    return templates.TemplateResponse("register.html", {"request": request})


@app.get("/login", response_class=HTMLResponse)
def login_page(request: Request):
    return templates.TemplateResponse("login.html", {"request": request})


@app.get("/dashboard", response_class=HTMLResponse)
def dashboard(request: Request, api_key: Optional[str] = None):
    if not api_key:
        return RedirectResponse(url="/login")

    user = get_user_by_api_key(api_key)
    if not user:
        return RedirectResponse(url="/login")

    conn = get_db_connection()
    tasks = conn.execute(
        'SELECT * FROM crawl_tasks WHERE user_id = ? ORDER BY created_at DESC',
        (user['id'],)
    ).fetchall()
    conn.close()

    return templates.TemplateResponse("dashboard.html", {
        "request": request,
        "api_key": api_key,
        "tasks": tasks,
        "username": user['username']
    })


@app.get("/tasks/{task_id}/result", response_class=HTMLResponse)
def task_result(request: Request, task_id: int, api_key: str):
    """显示任务结果页面"""
    user = get_user_by_api_key(api_key)
    if not user:
        return RedirectResponse(url="/login")

    conn = get_db_connection()
    task = conn.execute(
        'SELECT * FROM crawl_tasks WHERE id = ? AND user_id = ?',
        (task_id, user['id'])
    ).fetchone()

    if not task:
        return templates.TemplateResponse("error.html", {
            "request": request,
            "message": "任务未找到"
        })

    if task['status'] != 'completed':
        return templates.TemplateResponse("error.html", {
            "request": request,
            "message": "任务尚未完成"
        })

    news_data = []
    if task['news_data_path'] and os.path.exists(task['news_data_path']):
        try:
            news_df = pd.read_csv(task['news_data_path'])
            # 确保 cleaned_content 是字符串类型
            news_df['cleaned_content'] = news_df['cleaned_content'].fillna('').astype(str)

            # 将DataFrame转换为字典列表，只取100条
            news_data = news_df[20:].head(100).to_dict('records')
        except Exception as e:
            logging.error(f"读取新闻数据CSV失败: {e}")

    # 从CSV文件读取词频数据
    word_frequency = []
    if task['word_frequency_path'] and os.path.exists(task['word_frequency_path']):
        try:
            word_freq_df = pd.read_csv(task['word_frequency_path'])
            # 确保 word 和 frequency 是正确类型
            word_freq_df['word'] = word_freq_df['word'].fillna('').astype(str)
            word_freq_df['frequency'] = word_freq_df['frequency'].fillna(0).astype(int)

            # 转换为字典列表，只取前20个高频词
            word_frequency = word_freq_df.head(20).to_dict('records')
        except Exception as e:
            logging.error(f"读取词频数据CSV失败: {e}")

    conn.close()

    return templates.TemplateResponse("task_result.html", {
        "request": request,
        "api_key": api_key,
        "task": task,
        "news_data": news_data,
        "word_frequency": word_frequency,
        "username": user['username']
    })


@app.on_event("startup")
async def startup_event():
    app.state.client = httpx.AsyncClient(base_url="http://localhost:8000")
    # 初始化jieba分词
    jieba.initialize()


@app.on_event("shutdown")
async def shutdown_event():
    await app.state.client.aclose()


if __name__ == "__main__":
    uvicorn.run(app, host="127.0.0.1", port=8000)