import numpy as np
from typing import List, Dict, Tuple
from datetime import datetime
from collections import defaultdict
import spacy
import openai
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
from .config import AppConfig
from .arxiv_client import ArxivPaper

nlp = spacy.load("en_core_web_sm")

class TrendAnalyzer:
    def __init__(self, config: AppConfig):
        self.config = config
        openai.api_key = config.openai_api_key
        self.min_clusters = 3
        self.max_clusters = 8

    async def analyze_trends(self, papers: List[ArxivPaper]) -> Dict:
        """执行完整的趋势分析流程"""
        # 文本预处理
        clean_texts = self._preprocess_texts([p.abstract for p in papers])
        
        # 获取文本嵌入
        embeddings = await self._get_embeddings(clean_texts)
        
        # 聚类分析
        cluster_labels = self._cluster_embeddings(embeddings)
        
        # 提取关键词
        keywords = self._extract_keywords(clean_texts, cluster_labels)
        
        # 按时间分析趋势
        time_trends = self._analyze_time_trends(papers, cluster_labels)
        
        return {
            "clusters": [
                {
                    "id": i,
                    "keywords": keywords[i],
                    "trend": time_trends[i],
                    "representative_papers": self._get_representative_papers(
                        papers, cluster_labels, i, embeddings
                    )
                }
                for i in range(len(keywords))
            ],
            "statistics": {
                "total_papers": len(papers),
                "time_periods": len(set(time_trends[0]["periods"]))
            }
        }

    def _preprocess_texts(self, texts: List[str]) -> List[str]:
        """使用spaCy进行文本预处理"""
        processed = []
        for doc in nlp.pipe(texts, disable=["parser", "ner"]):
            tokens = [
                token.lemma_.lower() 
                for token in doc 
                if not token.is_stop and token.is_alpha
            ]
            processed.append(" ".join(tokens))
        return processed

    async def _get_embeddings(self, texts: List[str]) -> np.ndarray:
        """获取OpenAI文本嵌入"""
        response = await openai.Embedding.acreate(
            input=texts,
            model="text-embedding-ada-002"
        )
        return np.array([item["embedding"] for item in response["data"]])

    def _cluster_embeddings(self, embeddings: np.ndarray) -> np.ndarray:
        """使用K-means进行聚类"""
        n_clusters = min(
            max(self.min_clusters, len(embeddings) // 10), 
            self.max_clusters
        )
        kmeans = KMeans(n_clusters=n_clusters, random_state=42)
        return kmeans.fit_predict(embeddings)

    def _extract_keywords(self, texts: List[str], labels: np.ndarray) -> Dict[int, List[str]]:
        """提取每个聚类的关键词"""
        cluster_docs = defaultdict(list)
        for text, label in zip(texts, labels):
            cluster_docs[label].extend(text.split())
        
        keywords = {}
        for label, words in cluster_docs.items():
            freq = defaultdict(int)
            for word in words:
                freq[word] += 1
            keywords[label] = sorted(freq.items(), key=lambda x: -x[1])[:5]
        
        return keywords

    def _analyze_time_trends(self, papers: List[ArxivPaper], labels: np.ndarray) -> Dict[int, Dict]:
        """分析每个聚类的时间趋势"""
        time_points = sorted({p.published.strftime("%Y-%m") for p in papers})
        trends = defaultdict(lambda: {"periods": time_points, "counts": [0]*len(time_points)})
        
        for paper, label in zip(papers, labels):
            time_key = paper.published.strftime("%Y-%m")
            idx = time_points.index(time_key)
            trends[label]["counts"][idx] += 1
            
        return trends

    def _get_representative_papers(self, papers: List[ArxivPaper], 
                                 labels: np.ndarray, 
                                 cluster_id: int,
                                 embeddings: np.ndarray) -> List[Dict]:
        """获取每个聚类的代表性论文"""
        cluster_indices = np.where(labels == cluster_id)[0]
        if len(cluster_indices) == 0:
            return []
            
        cluster_embeddings = embeddings[cluster_indices]
        centroid = cluster_embeddings.mean(axis=0)
        
        # 计算与质心的相似度
        similarities = cosine_similarity(
            [centroid],
            cluster_embeddings
        )[0]
        
        # 获取相似度最高的3篇论文
        top_indices = similarities.argsort()[-3:][::-1]
        return [
            {
                "id": papers[cluster_indices[i]].id,
                "title": papers[cluster_indices[i]].title,
                "similarity": float(similarities[i])
            }
            for i in top_indices
        ]