"""
微博热搜评论语义分析系统 - 后端主程序
FastAPI + PyTorch + Transformers
"""

import os
import asyncio
from datetime import datetime, timedelta
from typing import List, Dict, Any
import json
import sqlite3
from pathlib import Path

from fastapi import FastAPI, HTTPException, Query
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import uvicorn

# 初始化FastAPI应用
app = FastAPI(
    title="微博热搜评论语义分析系统",
    description="基于深度学习的微博热搜评论情感分析API",
    version="1.0.0"
)

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 数据模型
class HotTopic(BaseModel):
    id: str
    title: str
    description: str
    hot: int
    comment_count: int
    create_time: datetime

class Comment(BaseModel):
    id: str
    text: str
    sentiment: float
    create_time: datetime

class AnalysisResult(BaseModel):
    total_comments: int
    avg_sentiment: float
    sentiment_distribution: Dict[str, int]
    sentiment_trend: Dict[str, List]
    keywords: List[Dict[str, Any]]
    comments: List[Comment]

# 初始化数据库
def init_database():
    """初始化SQLite数据库"""
    db_path = Path("data/weibo_analysis.db")
    db_path.parent.mkdir(exist_ok=True)
    
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()
    
    # 创建热搜话题表
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS hot_topics (
            id TEXT PRIMARY KEY,
            title TEXT NOT NULL,
            description TEXT,
            hot INTEGER DEFAULT 0,
            comment_count INTEGER DEFAULT 0,
            create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        )
    ''')
    
    # 创建评论表
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS comments (
            id TEXT PRIMARY KEY,
            topic_id TEXT NOT NULL,
            text TEXT NOT NULL,
            sentiment REAL DEFAULT 0,
            keywords TEXT,
            create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
            FOREIGN KEY (topic_id) REFERENCES hot_topics (id)
        )
    ''')
    
    conn.commit()
    conn.close()

# 情感分析模型
class SentimentAnalyzer:
    def __init__(self):
        self.model_name = "hfl/chinese-bert-wwm"
        self.tokenizer = None
        self.model = None
        self.load_model()
    
    def load_model(self):
        """加载预训练的情感分析模型"""
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
            self.model = AutoModelForSequenceClassification.from_pretrained(
                self.model_name,
                num_labels=3  # 正面、中性、负面
            )
            self.model.eval()
            print("情感分析模型加载成功")
        except Exception as e:
            print(f"模型加载失败: {e}")
            # 使用简单的规则作为备选
            self.model = None
    
    def predict_sentiment(self, text: str) -> float:
        """预测文本情感得分 (-1到1之间)"""
        if not self.model:
            # 简单的规则基线
            positive_words = ['好', '棒', '赞', '喜欢', '支持', '优秀', '厉害']
            negative_words = ['差', '坏', '烂', '讨厌', '反对', '垃圾', '失望']
            
            score = 0
            for word in positive_words:
                if word in text:
                    score += 0.2
            for word in negative_words:
                if word in text:
                    score -= 0.2
            
            return max(-1, min(1, score))
        
        # 使用BERT模型预测
        inputs = self.tokenizer(
            text,
            return_tensors="pt",
            truncation=True,
            max_length=512,
            padding=True
        )
        
        with torch.no_grad():
            outputs = self.model(**inputs)
            predictions = torch.softmax(outputs.logits, dim=-1)
            
        # 将三分类转换为连续值
        sentiment_score = predictions[0][2].item() - predictions[0][0].item()
        return sentiment_score

# 关键词提取
def extract_keywords(text: str, top_k: int = 10) -> List[str]:
    """简单的关键词提取"""
    # 这里使用TF-IDF或TextRank算法
    # 为演示目的，使用简单的词频统计
    import jieba
    import re
    
    # 分词
    words = jieba.cut(text)
    
    # 过滤停用词
    stop_words = {'的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这'}
    
    word_freq = {}
    for word in words:
        if len(word) > 1 and word not in stop_words and re.match(r'^[\u4e00-\u9fa5]+$', word):
            word_freq[word] = word_freq.get(word, 0) + 1
    
    # 返回前top_k个关键词
    sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
    return [word for word, freq in sorted_words[:top_k]]

# 初始化
init_database()
analyzer = SentimentAnalyzer()

# 模拟数据生成
def generate_mock_data():
    """生成模拟数据用于演示"""
    conn = sqlite3.connect("data/weibo_analysis.db")
    cursor = conn.cursor()
    
    # 检查是否已有数据
    cursor.execute("SELECT COUNT(*) FROM hot_topics")
    if cursor.fetchone()[0] > 0:
        conn.close()
        return
    
    # 模拟热搜话题
    mock_topics = [
        {
            "id": "topic_001",
            "title": "双十一购物节",
            "description": "2024年双十一购物节火热进行中",
            "hot": 8765432,
            "comment_count": 15420
        },
        {
            "id": "topic_002",
            "title": "新能源汽车销量创新高",
            "description": "10月新能源汽车销量同比增长35%",
            "hot": 6543210,
            "comment_count": 8932
        },
        {
            "id": "topic_003",
            "title": "冬季旅游推荐",
            "description": "国内热门冬季旅游目的地盘点",
            "hot": 5432109,
            "comment_count": 5678
        },
        {
            "id": "topic_004",
            "title": "人工智能新进展",
            "description": "国产大模型技术取得重大突破",
            "hot": 4321098,
            "comment_count": 12456
        },
        {
            "id": "topic_005",
            "title": "健康饮食趋势",
            "description": "轻食主义成为新潮流",
            "hot": 3210987,
            "comment_count": 3421
        },
        {
            "id": "topic_006",
            "title": "电影票房破纪录",
            "description": "国庆档电影总票房创历史新高",
            "hot": 2109876,
            "comment_count": 7890
        }
    ]
    
    # 插入话题数据
    for topic in mock_topics:
        cursor.execute('''
            INSERT OR IGNORE INTO hot_topics (id, title, description, hot, comment_count)
            VALUES (?, ?, ?, ?, ?)
        ''', (topic["id"], topic["title"], topic["description"], topic["hot"], topic["comment_count"]))
    
    # 生成评论数据
    mock_comments = [
        "这个购物节真的太棒了，优惠力度很大！",
        "感觉价格并没有便宜多少，有点失望",
        "新能源汽车确实不错，环保又省钱",
        "充电还是不太方便，希望基础设施能跟上",
        "哈尔滨的雪景太美了，值得一去",
        "人太多了，体验不太好",
        "AI技术发展真快，期待更多应用",
        "担心AI会取代人类工作",
        "轻食真的很健康，已经坚持一个月了",
        "还是喜欢重口味，轻食太清淡了"
    ]
    
    for topic_id in [t["id"] for t in mock_topics]:
        for i in range(50):  # 每个话题50条评论
            text = mock_comments[i % len(mock_comments)]
            sentiment = analyzer.predict_sentiment(text)
            keywords = json.dumps(extract_keywords(text))
            
            cursor.execute('''
                INSERT INTO comments (id, topic_id, text, sentiment, keywords)
                VALUES (?, ?, ?, ?, ?)
            ''', (f"comment_{topic_id}_{i}", topic_id, text, sentiment, keywords))
    
    conn.commit()
    conn.close()

# 生成模拟数据
generate_mock_data()

# API路由
@app.get("/api/hot-topics", response_model=List[HotTopic])
async def get_hot_topics():
    """获取实时热搜榜单"""
    conn = sqlite3.connect("data/weibo_analysis.db")
    conn.row_factory = sqlite3.Row
    cursor = conn.cursor()
    
    cursor.execute('''
        SELECT * FROM hot_topics 
        ORDER BY hot DESC 
        LIMIT 50
    ''')
    
    topics = []
    for row in cursor.fetchall():
        topics.append(HotTopic(
            id=row["id"],
            title=row["title"],
            description=row["description"],
            hot=row["hot"],
            comment_count=row["comment_count"],
            create_time=datetime.fromisoformat(row["create_time"])
        ))
    
    conn.close()
    return topics

@app.get("/api/analysis/{topic_id}", response_model=AnalysisResult)
async def get_analysis(
    topic_id: str,
    time_range: str = Query("24h", regex="^(1h|24h|7d)$")
):
    """获取话题的情感分析结果"""
    conn = sqlite3.connect("data/weibo_analysis.db")
    conn.row_factory = sqlite3.Row
    cursor = conn.cursor()
    
    # 检查话题是否存在
    cursor.execute("SELECT * FROM hot_topics WHERE id = ?", (topic_id,))
    if not cursor.fetchone():
        raise HTTPException(status_code=404, detail="话题不存在")
    
    # 获取评论数据
    time_filter = {
        "1h": timedelta(hours=1),
        "24h": timedelta(days=1),
        "7d": timedelta(days=7)
    }[time_range]
    
    cursor.execute('''
        SELECT * FROM comments 
        WHERE topic_id = ? 
        ORDER BY create_time DESC
    ''', (topic_id,))
    
    comments = []
    sentiment_scores = []
    sentiment_distribution = {"positive": 0, "neutral": 0, "negative": 0}
    
    for row in cursor.fetchall():
        sentiment = row["sentiment"]
        comments.append(Comment(
            id=row["id"],
            text=row["text"],
            sentiment=sentiment,
            create_time=datetime.fromisoformat(row["create_time"])
        ))
        
        sentiment_scores.append(sentiment)
        
        # 统计情感分布
        if sentiment > 0.1:
            sentiment_distribution["positive"] += 1
        elif sentiment < -0.1:
            sentiment_distribution["negative"] += 1
        else:
            sentiment_distribution["neutral"] += 1
    
    # 计算关键词权重
    keyword_weights = {}
    for comment in comments:
        keywords = json.loads(cursor.execute(
            "SELECT keywords FROM comments WHERE id = ?", 
            (comment.id,)
        ).fetchone()["keywords"] or "[]")
        
        for keyword in keywords:
            keyword_weights[keyword] = keyword_weights.get(keyword, 0) + 1
    
    keywords = [
        {"word": word, "weight": count / len(comments) if comments else 0}
        for word, count in keyword_weights.items()
    ]
    keywords.sort(key=lambda x: x["weight"], reverse=True)
    
    # 生成情感趋势数据
    trend_labels = []
    trend_values = []
    
    # 模拟时间序列数据
    for i in range(24):
        trend_labels.append(f"{i:02d}:00")
        # 添加一些随机波动
        base_sentiment = sum(sentiment_scores) / len(sentiment_scores) if sentiment_scores else 0
        trend_values.append(base_sentiment + (torch.randn(1).item() * 0.1))
    
    conn.close()
    
    return AnalysisResult(
        total_comments=len(comments),
        avg_sentiment=sum(sentiment_scores) / len(sentiment_scores) if sentiment_scores else 0,
        sentiment_distribution=sentiment_distribution,
        sentiment_trend={"labels": trend_labels, "values": trend_values},
        keywords=keywords[:20],
        comments=comments[:10]
    )

@app.get("/api/export/{topic_id}")
async def export_data(topic_id: str):
    """导出分析数据"""
    # 实现数据导出功能
    return {"message": "导出功能开发中"}

# 健康检查
@app.get("/health")
async def health_check():
    return {"status": "healthy", "timestamp": datetime.now()}

if __name__ == "__main__":
    uvicorn.run(
        "main:app",
        host="0.0.0.0",
        port=8000,
        reload=True
    )
