import time
import jieba
import requests
import numpy as np
from typing import List, Dict, Any, Optional, Tuple
from rank_bm25 import BM25Okapi
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from config import config

class DataProcessor:
    """数据处理模块"""
    
    def __init__(self):
        self.bm25 = None
        self.all_docs = []
        self.tokenized_corpus = []
        self.tfidf_vectorizer = None
        self.tfidf_matrix = None
        
    def validate_oneapi_service(self) -> bool:
        """验证OneAPI服务有效性"""
        try:
            print(f"验证OneAPI服务: {config.ONEAPI_BASE_URL}")
            headers = {
                "Authorization": f"Bearer {config.ONEAPI_KEY}",
                "Content-Type": "application/json"
            }

            test_data = {
                "input": "测试向量",
                "model": config.EMBEDDING_MODEL
            }

            response = requests.post(
                f"{config.ONEAPI_BASE_URL}/embeddings",
                headers=headers,
                json=test_data,
                timeout=10
            )

            if response.status_code == 200:
                embedding_data = response.json()
                vector = embedding_data["data"][0]["embedding"]
                print(f"OneAPI服务验证成功，向量维度: {len(vector)}")
                return True
            else:
                print(f"OneAPI服务验证失败: HTTP {response.status_code}")
                return False

        except Exception as e:
            print(f"OneAPI验证失败: {str(e)}")
            return False
    
    def generate_embedding(self, text: str) -> List[float]:
        """使用OneAPI生成文本嵌入向量"""
        headers = {
            "Authorization": f"Bearer {config.ONEAPI_KEY}",
            "Content-Type": "application/json"
        }

        data = {
            "input": text,
            "model": config.EMBEDDING_MODEL
        }

        for attempt in range(3):
            try:
                response = requests.post(
                    f"{config.ONEAPI_BASE_URL}/embeddings",
                    headers=headers,
                    json=data,
                    timeout=15
                )

                if response.status_code == 200:
                    result = response.json()
                    return result["data"][0]["embedding"]
                else:
                    print(f"嵌入请求失败 (尝试 {attempt + 1}/3): HTTP {response.status_code}")
                    time.sleep(1)

            except requests.exceptions.RequestException as e:
                print(f"嵌入请求异常 (尝试 {attempt + 1}/3): {str(e)}")
                time.sleep(1)

        raise Exception(f"无法生成嵌入向量: {text}")
    
    def tokenize_text(self, text: str) -> List[str]:
        """中文文本分词"""
        return list(jieba.cut(text))
    
    def create_test_data(self) -> List[Dict[str, Any]]:
        """创建测试数据结构"""
        documents = [
            {
                "title": "OneAPI 配置指南",
                "content": "OneAPI 提供了兼容OpenAI的API接口，允许您使用不同的后端模型",
                "category": "技术文档"
            },
            {
                "title": "OneAPI 配置指南",
                "content": "认证使用Bearer令牌，路径为/v1/completions，参数与OpenAI官方API兼容",
                "category": "开发指南"
            },
            {
                "title": "OneAPI 配置指南",
                "content": "通过将文本转换为向量表示，可以在高维空间中计算文本之间的相似度",
                "category": "AI理论"
            },
            {
                "title": "混合检索优势",
                "content": "结合关键词检索和语义检索可以提高复杂查询的准确率",
                "category": "最佳实践"
            },
            {
                "title": "混合检索优势",
                "content": "BM25是基于词频和文档长度的经典信息检索算法",
                "category": "信息检索"
            },
            {
                "title": "混合检索优势",
                "content": "Dense Passage Retrieval使用双编码器进行深度语义匹配",
                "category": "深度学习"
            },
            {
                "title": "嵌入向量应用",
                "content": "对于长查询，使用分块处理和上下文融合可以显著提高相关性",
                "category": "性能优化"
            },
            {
                "title": "嵌入向量应用",
                "content": "中文检索需要有效的分词算法来处理无空格分隔的文本",
                "category": "自然语言处理"
            },
            {
                "title": "嵌入向量应用",
                "content": "嵌入向量可以用于语义搜索、文本分类和推荐系统等多种任务",
                "category": "应用场景"
            },
            {
                "title": "嵌入向量应用",
                "content": "通过批处理请求和缓存机制可以提高嵌入API的性能",
                "category": "性能优化"
            }
        ]
        
        # 为每个文档生成嵌入向量
        for doc in documents:
            doc["embedding"] = self.generate_embedding(doc["content"])
            doc["tokenized_content"] = self.tokenize_text(doc["content"])
        
        return documents
    
    def initialize_retrieval_models(self, documents: List[Dict[str, Any]]):
        """初始化检索模型"""
        self.all_docs = documents
        
        # 初始化BM25 - 处理可能缺少tokenized_content字段的文档
        tokenized_corpus = []
        for doc in documents:
            if "tokenized_content" in doc:
                tokenized_corpus.append(doc["tokenized_content"])
            else:
                # 如果没有tokenized_content字段，实时分词
                tokenized_corpus.append(self.tokenize_text(doc["content"]))
        
        self.bm25 = BM25Okapi(tokenized_corpus)
        self.tokenized_corpus = tokenized_corpus
        
        # 初始化TF-IDF
        text_corpus = [doc["content"] for doc in documents]
        self.tfidf_vectorizer = TfidfVectorizer(
            max_features=1000,
            stop_words=None,
            ngram_range=(1, 2)
        )
        self.tfidf_matrix = self.tfidf_vectorizer.fit_transform(text_corpus)
        
        print(f"检索模型初始化完成，文档数量: {len(documents)}")
    
    def hybrid_search(self, query: str, article_title: str = "", k: int = None) -> List[Dict[str, Any]]:
        """混合检索：结合BM25和向量检索"""
        if k is None:
            k = config.TOP_K
            
        print(f"\n混合检索查询: '{query}'，文章标题: '{article_title}'")
        start_time = time.time()

        # 根据文章标题筛选文档
        if article_title:
            filtered_docs = [doc for doc in self.all_docs if doc.get("title", "") == article_title]
            if not filtered_docs:
                print(f"未找到标题为 '{article_title}' 的文档")
                return []
        else:
            filtered_docs = self.all_docs

        print(f"筛选后文档数量: {len(filtered_docs)}")

        # 为筛选后的文档重新构建BM25模型
        tokenized_corpus_filtered = []
        for doc in filtered_docs:
            if "tokenized_content" in doc:
                tokenized_corpus_filtered.append(doc["tokenized_content"])
            else:
                tokenized_corpus_filtered.append(self.tokenize_text(doc["content"]))
        
        bm25_filtered = BM25Okapi(tokenized_corpus_filtered)

        # BM25检索
        bm25_start = time.time()
        tokenized_query = self.tokenize_text(query)
        bm25_scores = bm25_filtered.get_scores(tokenized_query)
        
        # 获取所有BM25分数大于0的索引
        bm25_indices = [idx for idx, score in enumerate(bm25_scores) if score > 0]
        if len(bm25_indices) > config.BM25_TOP_N:
            bm25_indices = bm25_indices[:config.BM25_TOP_N]
        
        bm25_time = time.time() - bm25_start
        print(f"BM25检索找到 {len(bm25_indices)} 个相关文档")

        # 向量检索
        vector_start = time.time()
        try:
            query_embedding = self.generate_embedding(query)
        except Exception as e:
            print(f"查询嵌入生成失败: {str(e)}")
            return []

        # 计算余弦相似度
        similarities = []
        query_vector = np.array(query_embedding).reshape(1, -1)
        for doc in filtered_docs:
            doc_vector = np.array(doc["embedding"]).reshape(1, -1)
            similarity = np.dot(query_vector, doc_vector.T) / (
                np.linalg.norm(query_vector) * np.linalg.norm(doc_vector)
            )
            similarities.append(similarity[0][0])

        # 结合两种检索结果
        combined_scores = {}
        
        # 如果有BM25结果，结合BM25和向量分数
        if bm25_indices:
            for idx in bm25_indices:
                combined_scores[idx] = bm25_scores[idx] * 0.3 + similarities[idx] * 0.7
        else:
            # 如果BM25没有结果，只使用向量分数
            print("BM25无结果，仅使用向量检索")
            for idx in range(len(filtered_docs)):
                combined_scores[idx] = similarities[idx]

        # 排序并返回结果
        sorted_indices = sorted(combined_scores.keys(), key=lambda x: combined_scores[x], reverse=True)
        results = []

        for idx in sorted_indices[:k]:
            doc = filtered_docs[idx].copy()
            doc["score"] = combined_scores[idx]
            doc["bm25_score"] = bm25_scores[idx] if idx in bm25_indices else 0
            doc["similarity_score"] = similarities[idx]
            results.append(doc)

        vector_time = time.time() - vector_start
        total_time = time.time() - start_time
        print(f"混合检索完成 ({total_time:.2f}s): BM25阶段{bm25_time:.2f}s, 向量阶段{vector_time:.2f}s")
        print(f"最终返回 {len(results)} 个结果")

        return results
    
    def tfidf_expansion_vector_search(self, query: str, article_title: str = "", k: int = None, expansion_terms: int = 3) -> List[Dict[str, Any]]:
        """TF-IDF扩展查询 + 向量检索策略"""
        if k is None:
            k = config.TOP_K
            
        print(f"\nTF-IDF扩展向量检索查询: '{query}'，文章标题: '{article_title}'")
        start_time = time.time()

        # 根据文章标题筛选文档
        if article_title:
            filtered_docs = [doc for doc in self.all_docs if doc.get("title", "") == article_title]
            if not filtered_docs:
                print(f"未找到标题为 '{article_title}' 的文档")
                return []
        else:
            filtered_docs = self.all_docs

        # 为筛选后的文档重新构建TF-IDF模型
        text_corpus_filtered = [doc["content"] for doc in filtered_docs]
        tfidf_vectorizer_filtered = TfidfVectorizer(
            max_features=1000,
            stop_words=None,
            ngram_range=(1, 2)
        )
        tfidf_matrix_filtered = tfidf_vectorizer_filtered.fit_transform(text_corpus_filtered)

        # 阶段1: TF-IDF查询扩展
        tfidf_start = time.time()
        
        # 将查询转换为TF-IDF向量
        query_vec = tfidf_vectorizer_filtered.transform([query])
        
        # 计算查询与文档的相似度
        cosine_similarities = linear_kernel(query_vec, tfidf_matrix_filtered).flatten()
        
        # 获取最相关的文档用于查询扩展
        top_indices = cosine_similarities.argsort()[::-1][:3]  # 取前3个相关文档
        
        # 从相关文档中提取重要词汇
        try:
            feature_names = tfidf_vectorizer_filtered.get_feature_names_out()
        except AttributeError:
            feature_names = tfidf_vectorizer_filtered.get_feature_names()  # 兼容旧版本sklearn

        expanded_terms = set()
        for idx in top_indices:
            # 获取文档的TF-IDF向量
            doc_vec = tfidf_matrix_filtered[idx]
            
            # 获取最重要的词汇（TF-IDF值最高的词）
            sorted_terms = doc_vec.toarray().argsort()[0][::-1]
            for term_idx in sorted_terms[:10]:  # 每个文档取前10个重要词
                term = feature_names[term_idx]
                # 排除数字、单个字符和已在查询中的词
                if (len(term) > 1 and
                        not term.isdigit() and
                        term not in query):
                    expanded_terms.add(term)
                    if len(expanded_terms) >= expansion_terms:
                        break
            if len(expanded_terms) >= expansion_terms:
                break

        # 构建扩展查询
        expanded_query = query + " " + " ".join(expanded_terms)
        print(f"原始查询: '{query}' → 扩展查询: '{expanded_query}' (新增词汇: {', '.join(expanded_terms)})")

        tfidf_time = time.time() - tfidf_start

        # 阶段2: 向量检索
        vector_start = time.time()
        try:
            # 生成扩展查询的向量
            query_embedding = self.generate_embedding(expanded_query)
        except Exception as e:
            print(f"查询嵌入生成失败: {str(e)}")
            return []

        # 计算所有文档的相似度
        results = []
        for doc in filtered_docs:
            doc_vector = doc["embedding"]
            if len(doc_vector) != len(query_embedding):
                continue

            # 计算点积相似度
            similarity = np.dot(query_embedding, doc_vector)
            original_similarity = np.dot(self.generate_embedding(query), doc_vector)
            results.append({
                "doc": doc,
                "expanded_score": similarity,
                "original_score": original_similarity
            })

        # 按相似度排序
        results.sort(key=lambda x: x["expanded_score"], reverse=True)
        top_results = results[:k]

        vector_time = time.time() - vector_start
        total_time = time.time() - start_time

        print(f"TF-IDF扩展向量检索完成 ({total_time:.2f}s): TF-IDF阶段{tfidf_time:.2f}s, 向量阶段{vector_time:.2f}s")

        return top_results
    
    def improved_keyword_search(self, query: str, article_title: str = "", k: int = None) -> List[Dict[str, Any]]:
        """改进的基于文本关键词的全文检索策略（BM25+位置加权+标题匹配）"""
        if k is None:
            k = config.TOP_K
            
        print(f"\n改进关键词检索查询: '{query}'，文章标题: '{article_title}'")
        start_time = time.time()

        # 根据文章标题筛选文档
        if article_title:
            filtered_docs = [doc for doc in self.all_docs if doc.get("title", "") == article_title]
            if not filtered_docs:
                print(f"未找到标题为 '{article_title}' 的文档")
                return []
        else:
            filtered_docs = self.all_docs

        # 为筛选后的文档重新构建BM25模型
        tokenized_corpus_filtered = []
        for doc in filtered_docs:
            if "tokenized_content" in doc:
                tokenized_corpus_filtered.append(doc["tokenized_content"])
            else:
                tokenized_corpus_filtered.append(self.tokenize_text(doc["content"]))
        
        bm25_filtered = BM25Okapi(tokenized_corpus_filtered)

        # 阶段1: 构建增强型BM25模型
        tokenized_query = self.tokenize_text(query)
        bm25_scores = bm25_filtered.get_scores(tokenized_query)

        # 阶段2: 增强特征计算
        enhanced_scores = []
        for i, doc in enumerate(filtered_docs):
            content = doc["content"]
            score = bm25_scores[i]

            # 1. 位置加权：关键词出现在文档开头给予更高权重
            first_occurrence = float('inf')
            for term in tokenized_query:
                pos = content.find(term)
                if 0 <= pos < first_occurrence:
                    first_occurrence = pos

            # 位置权重：前10%内容出现加分
            position_weight = 1.0
            if first_occurrence < len(content) * 0.1 and len(content) > 0:
                position_weight = 1.5
            elif first_occurrence < len(content) * 0.3 and len(content) > 0:
                position_weight = 1.2

            # 2. 标题匹配：查询词出现在标题中加分
            title_match = 1.0
            title = doc.get("title", "")
            for term in tokenized_query:
                if term in title:
                    title_match += 0.3  # 每个匹配词加分

            # 3. 词频增强：查询词在文档中的频率
            term_freq_boost = 1.0
            for term in tokenized_query:
                term_count = content.count(term)
                if term_count > 3:
                    term_freq_boost += 0.1 * min(term_count, 10)  # 最大加1分

            # 最终增强分数 = BM25 × 位置权重 × 标题匹配 × 词频增强
            enhanced_score = bm25_scores[i] * position_weight * title_match * term_freq_boost
            enhanced_scores.append(enhanced_score)

        # 获取top k结果
        top_indices = np.argsort(enhanced_scores)[::-1][:k]
        results = []
        for i in top_indices:
            doc = filtered_docs[i]
            content = doc["content"]
            title = doc.get("title", "")
            
            # 重新计算权重用于返回
            first_occurrence = float('inf')
            for term in tokenized_query:
                pos = content.find(term)
                if 0 <= pos < first_occurrence:
                    first_occurrence = pos

            position_weight = 1.0
            if first_occurrence < len(content) * 0.1 and len(content) > 0:
                position_weight = 1.5
            elif first_occurrence < len(content) * 0.3 and len(content) > 0:
                position_weight = 1.2

            title_match = 1.0
            for term in tokenized_query:
                if term in title:
                    title_match += 0.3

            term_freq_boost = 1.0
            for term in tokenized_query:
                term_count = content.count(term)
                if term_count > 3:
                    term_freq_boost += 0.1 * min(term_count, 10)

            results.append({
                "doc": doc,
                "enhanced_score": enhanced_scores[i],
                "bm25_score": bm25_scores[i],
                "position_weight": position_weight,
                "title_match": title_match,
                "term_freq_boost": term_freq_boost
            })

        total_time = time.time() - start_time
        print(f"增强关键词检索完成 ({total_time:.2f}s): 返回{len(results)}个结果")

        return results
    
    def tfidf_search(self, query: str, article_title: str = "", k: int = None) -> List[Dict[str, Any]]:
        """基础TF-IDF检索"""
        if k is None:
            k = config.TOP_K
            
        # 根据文章标题筛选文档
        if article_title:
            filtered_docs = [doc for doc in self.all_docs if doc.get("title", "") == article_title]
            if not filtered_docs:
                print(f"未找到标题为 '{article_title}' 的文档")
                return []
        else:
            filtered_docs = self.all_docs

        # 为筛选后的文档重新构建TF-IDF模型
        text_corpus_filtered = [doc["content"] for doc in filtered_docs]
        tfidf_vectorizer_filtered = TfidfVectorizer(
            max_features=1000,
            stop_words=None,
            ngram_range=(1, 2)
        )
        tfidf_matrix_filtered = tfidf_vectorizer_filtered.fit_transform(text_corpus_filtered)
            
        # 使用TF-IDF找到相关词汇
        query_vector = tfidf_vectorizer_filtered.transform([query])
        cosine_similarities = linear_kernel(query_vector, tfidf_matrix_filtered).flatten()
        
        # 获取最相关的文档索引
        top_indices = cosine_similarities.argsort()[-k:][::-1]
        
        results = []
        for idx in top_indices:
            if cosine_similarities[idx] > 0:
                doc = filtered_docs[idx].copy()
                doc["tfidf_score"] = cosine_similarities[idx]
                results.append(doc)
        
        return results 