# -*- coding: utf-8 -*-
"""
关键词检索器
基于关键词匹配的检索方法
"""

import re
from typing import Dict, List, Any, Set, Tuple
from collections import defaultdict, Counter
from loguru import logger
import json
import os

from .base_retriever import BaseRetriever, RetrievalResult
from ..ollama_client import ollama_client


class KeywordRetriever(BaseRetriever):
    """关键词检索器"""
    
    def __init__(self, config: Dict[str, Any] = None):
        """
        初始化关键词检索器
        
        Args:
            config: 配置参数
        """
        super().__init__("KeywordRetriever", config)
        
        # 配置参数
        self.match_threshold = self.config.get("match_threshold", 0.3)
        self.use_fuzzy_match = self.config.get("use_fuzzy_match", True)
        self.keyword_weight = self.config.get("keyword_weight", 1.0)
        self.exact_match_bonus = self.config.get("exact_match_bonus", 0.5)
        self.cache_path = self.config.get("cache_path", "data/keyword_cache.json")
        
        # 索引数据
        self.keyword_index = defaultdict(list)  # 关键词 -> 文档ID列表
        self.document_keywords = {}  # 文档ID -> 关键词列表
        self.documents = {}  # 文档ID -> 文档数据
        self.idf_scores = {}  # 关键词 -> IDF分数
        
        # 中文停用词
        self.stop_words = {
            '的', '是', '在', '有', '和', '与', '或', '但', '如果', '那么', 
            '什么', '怎么', '为什么', '如何', '哪里', '哪个', '多少', '几个',
            '一个', '这个', '那个', '这些', '那些', '我们', '你们', '他们',
            '可以', '能够', '应该', '需要', '必须', '会', '要', '想', '让'
        }
        
        logger.info("🔍 关键词检索器初始化完成")
    
    def initialize(self, data: List[Dict[str, Any]]) -> bool:
        """
        初始化关键词检索器
        
        Args:
            data: 训练数据
        
        Returns:
            bool: 初始化是否成功
        """
        try:
            logger.info(f"📊 开始初始化关键词检索器，数据量: {len(data)}")
            
            # 尝试加载缓存
            if self._load_cache():
                logger.info("✅ 从缓存加载关键词索引")
                self.is_initialized = True
                return True
            
            # 构建关键词索引
            self._build_keyword_index(data)
            
            # 计算IDF分数
            self._calculate_idf_scores()
            
            # 保存缓存
            self._save_cache()
            
            self.is_initialized = True
            logger.info("✅ 关键词检索器初始化成功")
            return True
            
        except Exception as e:
            logger.error(f"❌ 关键词检索器初始化异常: {e}")
            return False
    
    def _build_keyword_index(self, data: List[Dict[str, Any]]) -> None:
        """
        构建关键词索引
        
        Args:
            data: 训练数据
        """
        logger.info("🔧 构建关键词索引...")
        
        for item in data:
            doc_id = item.get('id', len(self.documents))
            
            # 存储文档数据
            self.documents[doc_id] = {
                'id': doc_id,
                'question': item['question'],
                'answer': item.get('answer', ''),
                'category': item.get('category'),
                'difficulty': item.get('difficulty', 'medium'),
                'metadata': item
            }
            
            # 提取关键词
            text = f"{item['question']} {item.get('answer', '')}"
            keywords = self._extract_keywords(text)
            
            # 添加预定义的关键词
            if 'keywords' in item and item['keywords']:
                keywords.extend(item['keywords'])
            
            # 去重并过滤
            keywords = list(set([kw for kw in keywords if kw and len(kw) > 1]))
            
            # 存储文档关键词
            self.document_keywords[doc_id] = keywords
            
            # 构建倒排索引
            for keyword in keywords:
                self.keyword_index[keyword].append(doc_id)
        
        logger.info(f"✅ 关键词索引构建完成，索引了 {len(self.keyword_index)} 个关键词")
    
    def _extract_keywords(self, text: str) -> List[str]:
        """
        从文本中提取关键词
        
        Args:
            text: 输入文本
        
        Returns:
            List[str]: 关键词列表
        """
        keywords = []
        
        # 1. 基于正则表达式的分词
        # 提取中文词汇（2-4个字符）
        chinese_words = re.findall(r'[\u4e00-\u9fff]{2,4}', text)
        keywords.extend(chinese_words)
        
        # 提取英文单词
        english_words = re.findall(r'\b[a-zA-Z]{2,}\b', text.lower())
        keywords.extend(english_words)
        
        # 提取数字
        numbers = re.findall(r'\d+', text)
        keywords.extend(numbers)
        
        # 2. 基于标点符号的短语提取
        phrases = re.split(r'[，。！？；：、\s]+', text)
        for phrase in phrases:
            if 2 <= len(phrase) <= 10:
                keywords.append(phrase.strip())
        
        # 3. 过滤停用词和短词
        filtered_keywords = []
        for kw in keywords:
            kw = kw.strip()
            if kw and len(kw) > 1 and kw not in self.stop_words:
                filtered_keywords.append(kw)
        
        return filtered_keywords
    
    def _calculate_idf_scores(self) -> None:
        """
        计算关键词的IDF分数
        """
        logger.info("📊 计算IDF分数...")
        
        total_docs = len(self.documents)
        
        for keyword, doc_list in self.keyword_index.items():
            doc_freq = len(set(doc_list))  # 包含该关键词的文档数
            idf = np.log(total_docs / (doc_freq + 1))  # 加1避免除零
            self.idf_scores[keyword] = idf
        
        logger.info(f"✅ IDF分数计算完成，处理了 {len(self.idf_scores)} 个关键词")
    
    def retrieve(self, question: str, top_k: int = 5) -> RetrievalResult:
        """
        执行关键词检索
        
        Args:
            question: 查询问题
            top_k: 返回结果数量
        
        Returns:
            RetrievalResult: 检索结果
        """
        if not self.is_initialized:
            logger.error("❌ 关键词检索器未初始化")
            return RetrievalResult(question, "", 0.0, 0.0)
        
        try:
            # 测量检索时间
            def _retrieve_internal():
                # 提取查询关键词
                query_keywords = self._extract_keywords(question)
                
                if not query_keywords:
                    # 如果没有提取到关键词，使用LLM生成答案
                    answer = ollama_client.answer_question(question)
                    return answer, 0.3, []
                
                # 计算文档分数
                doc_scores = self._calculate_document_scores(query_keywords)
                
                # 排序并获取top-k结果
                sorted_docs = sorted(doc_scores.items(), key=lambda x: x[1], reverse=True)
                top_docs = sorted_docs[:top_k]
                
                # 过滤低分文档
                filtered_docs = [(doc_id, score) for doc_id, score in top_docs 
                                if score >= self.match_threshold]
                
                if not filtered_docs:
                    # 如果没有满足阈值的结果，返回最高分的一个
                    if top_docs:
                        filtered_docs = [top_docs[0]]
                
                # 构建检索结果
                retrieved_docs = []
                for doc_id, score in filtered_docs:
                    doc_data = self.documents[doc_id]
                    retrieved_docs.append({
                        'id': doc_id,
                        'question': doc_data['question'],
                        'answer': doc_data['answer'],
                        'score': score,
                        'matched_keywords': self._get_matched_keywords(query_keywords, doc_id),
                        'metadata': doc_data['metadata']
                    })
                
                # 生成答案
                if retrieved_docs:
                    best_doc = retrieved_docs[0]
                    answer = best_doc['answer']
                    confidence = min(best_doc['score'], 1.0)
                    
                    # 如果最佳匹配分数较低，使用LLM增强答案
                    if confidence < 0.6:
                        context = "\n".join([doc['answer'] for doc in retrieved_docs[:3]])
                        llm_answer = ollama_client.answer_question(question, context)
                        if llm_answer and len(llm_answer) > len(answer):
                            answer = llm_answer
                            confidence = min(confidence + 0.2, 1.0)
                else:
                    answer = ollama_client.answer_question(question)
                    confidence = 0.3
                    retrieved_docs = []
                
                return answer, confidence, retrieved_docs
            
            result_data, response_time = self._measure_time(_retrieve_internal)
            answer, confidence, retrieved_docs = result_data
            
            # 更新成功计数
            if answer:
                self.success_count += 1
            
            return RetrievalResult(
                question=question,
                answer=answer,
                confidence=confidence,
                response_time=response_time,
                retrieved_docs=retrieved_docs,
                metadata={
                    'method': 'keyword',
                    'num_retrieved': len(retrieved_docs),
                    'query_keywords': self._extract_keywords(question)
                }
            )
            
        except Exception as e:
            logger.error(f"❌ 关键词检索失败: {e}")
            return RetrievalResult(question, "", 0.0, 0.0)
    
    def _calculate_document_scores(self, query_keywords: List[str]) -> Dict[int, float]:
        """
        计算文档分数
        
        Args:
            query_keywords: 查询关键词列表
        
        Returns:
            Dict[int, float]: 文档ID -> 分数
        """
        doc_scores = defaultdict(float)
        
        for query_kw in query_keywords:
            # 精确匹配
            if query_kw in self.keyword_index:
                idf_score = self.idf_scores.get(query_kw, 1.0)
                for doc_id in self.keyword_index[query_kw]:
                    doc_scores[doc_id] += self.keyword_weight * idf_score + self.exact_match_bonus
            
            # 模糊匹配（如果启用）
            if self.use_fuzzy_match:
                for indexed_kw in self.keyword_index.keys():
                    similarity = self._calculate_keyword_similarity(query_kw, indexed_kw)
                    if similarity > 0.7:  # 相似度阈值
                        idf_score = self.idf_scores.get(indexed_kw, 1.0)
                        for doc_id in self.keyword_index[indexed_kw]:
                            doc_scores[doc_id] += self.keyword_weight * idf_score * similarity
        
        return dict(doc_scores)
    
    def _calculate_keyword_similarity(self, kw1: str, kw2: str) -> float:
        """
        计算两个关键词的相似度
        
        Args:
            kw1: 关键词1
            kw2: 关键词2
        
        Returns:
            float: 相似度分数 (0-1)
        """
        if kw1 == kw2:
            return 1.0
        
        # 包含关系
        if kw1 in kw2 or kw2 in kw1:
            return 0.8
        
        # 编辑距离相似度
        def edit_distance(s1, s2):
            m, n = len(s1), len(s2)
            dp = [[0] * (n + 1) for _ in range(m + 1)]
            
            for i in range(m + 1):
                dp[i][0] = i
            for j in range(n + 1):
                dp[0][j] = j
            
            for i in range(1, m + 1):
                for j in range(1, n + 1):
                    if s1[i-1] == s2[j-1]:
                        dp[i][j] = dp[i-1][j-1]
                    else:
                        dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
            
            return dp[m][n]
        
        max_len = max(len(kw1), len(kw2))
        if max_len == 0:
            return 0.0
        
        distance = edit_distance(kw1, kw2)
        similarity = 1 - distance / max_len
        
        return max(0.0, similarity)
    
    def _get_matched_keywords(self, query_keywords: List[str], doc_id: int) -> List[str]:
        """
        获取查询关键词与文档的匹配关键词
        
        Args:
            query_keywords: 查询关键词
            doc_id: 文档ID
        
        Returns:
            List[str]: 匹配的关键词
        """
        doc_keywords = set(self.document_keywords.get(doc_id, []))
        matched = []
        
        for query_kw in query_keywords:
            if query_kw in doc_keywords:
                matched.append(query_kw)
            elif self.use_fuzzy_match:
                # 查找相似的关键词
                for doc_kw in doc_keywords:
                    if self._calculate_keyword_similarity(query_kw, doc_kw) > 0.7:
                        matched.append(f"{query_kw}~{doc_kw}")
                        break
        
        return matched
    
    def _load_cache(self) -> bool:
        """
        加载缓存的关键词索引
        
        Returns:
            bool: 加载是否成功
        """
        try:
            if os.path.exists(self.cache_path):
                with open(self.cache_path, 'r', encoding='utf-8') as f:
                    cache_data = json.load(f)
                
                self.keyword_index = defaultdict(list, cache_data.get('keyword_index', {}))
                self.document_keywords = {int(k): v for k, v in cache_data.get('document_keywords', {}).items()}
                self.documents = {int(k): v for k, v in cache_data.get('documents', {}).items()}
                self.idf_scores = cache_data.get('idf_scores', {})
                
                return True
            
            return False
            
        except Exception as e:
            logger.warning(f"⚠️ 加载关键词缓存失败: {e}")
            return False
    
    def _save_cache(self) -> None:
        """
        保存关键词索引到缓存
        """
        try:
            os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
            
            cache_data = {
                'keyword_index': dict(self.keyword_index),
                'document_keywords': {str(k): v for k, v in self.document_keywords.items()},
                'documents': {str(k): v for k, v in self.documents.items()},
                'idf_scores': self.idf_scores
            }
            
            with open(self.cache_path, 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"💾 关键词缓存已保存到 {self.cache_path}")
            
        except Exception as e:
            logger.warning(f"⚠️ 保存关键词缓存失败: {e}")


# 需要导入numpy
try:
    import numpy as np
except ImportError:
    logger.warning("⚠️ numpy未安装，IDF计算将使用简化版本")
    import math
    class np:
        @staticmethod
        def log(x):
            return math.log(x)


if __name__ == "__main__":
    # 测试关键词检索器
    config = {
        "match_threshold": 0.3,
        "use_fuzzy_match": True,
        "keyword_weight": 1.0
    }
    
    retriever = KeywordRetriever(config)
    
    # 测试数据
    test_data = [
        {
            "id": 1,
            "question": "什么是机器学习？",
            "answer": "机器学习是人工智能的一个分支，通过算法让计算机从数据中学习。",
            "category": "AI",
            "keywords": ["机器学习", "人工智能", "算法"]
        },
        {
            "id": 2,
            "question": "深度学习的原理是什么？",
            "answer": "深度学习使用多层神经网络来模拟人脑的学习过程。",
            "category": "AI",
            "keywords": ["深度学习", "神经网络", "学习"]
        }
    ]
    
    # 初始化
    if retriever.initialize(test_data):
        # 测试检索
        result = retriever.retrieve("机器学习是什么？")
        print(f"检索结果: {result.to_dict()}")
        
        # 性能统计
        stats = retriever.get_performance_stats()
        print(f"性能统计: {stats}")