# -*- coding: utf-8 -*-
"""
Rerank检索器
基于重排序的检索方法
"""

import json
import os
from typing import Dict, List, Any, Tuple, Optional
from loguru import logger
import numpy as np
from collections import defaultdict
import re

from .base_retriever import BaseRetriever, RetrievalResult
from .vector_retriever import VectorRetriever
from .keyword_retriever import KeywordRetriever
from ..ollama_client import ollama_client


class RerankRetriever(BaseRetriever):
    """Rerank检索器"""
    
    def __init__(self, config: Dict[str, Any] = None):
        """
        初始化Rerank检索器
        
        Args:
            config: 配置参数
        """
        super().__init__("RerankRetriever", config)
        
        # 配置参数
        self.initial_retrieval_size = self.config.get("initial_retrieval_size", 20)
        self.final_top_k = self.config.get("final_top_k", 5)
        self.rerank_method = self.config.get("rerank_method", "ollama_rerank")  # ollama_rerank, llm_scoring, cross_encoder, listwise
        self.use_multiple_retrievers = self.config.get("use_multiple_retrievers", True)
        self.min_confidence = self.config.get("min_confidence", 0.5)
        self.cache_path = self.config.get("cache_path", "data/rerank_cache.json")
        
        # 初始检索器
        self.vector_retriever = None
        self.keyword_retriever = None
        
        # 重排序特征权重
        self.feature_weights = {
            'semantic_similarity': 0.3,
            'keyword_match': 0.2,
            'answer_quality': 0.25,
            'relevance_score': 0.15,
            'length_penalty': 0.1
        }
        
        # 缓存
        self.rerank_cache = {}
        
        logger.info("🔄 Rerank检索器初始化完成")
    
    def initialize(self, data: List[Dict[str, Any]]) -> bool:
        """
        初始化Rerank检索器
        
        Args:
            data: 训练数据
        
        Returns:
            bool: 初始化是否成功
        """
        try:
            logger.info(f"📊 开始初始化Rerank检索器，数据量: {len(data)}")
            
            # 加载缓存
            self._load_cache()
            
            # 初始化基础检索器
            if self.use_multiple_retrievers:
                # 向量检索器
                vector_config = self.config.get("vector_config", {
                    "embedding_method": "llm",
                    "similarity_threshold": 0.3
                })
                self.vector_retriever = VectorRetriever(vector_config)
                
                # 关键词检索器
                keyword_config = self.config.get("keyword_config", {
                    "match_type": "fuzzy",
                    "min_score": 0.3
                })
                self.keyword_retriever = KeywordRetriever(keyword_config)
                
                # 初始化基础检索器
                if not self.vector_retriever.initialize(data):
                    logger.error("❌ 向量检索器初始化失败")
                    return False
                
                if not self.keyword_retriever.initialize(data):
                    logger.error("❌ 关键词检索器初始化失败")
                    return False
            
            # 存储数据用于重排序
            self.data = data
            self.doc_index = {str(item.get('id', i)): item for i, item in enumerate(data)}
            
            self.is_initialized = True
            logger.info("✅ Rerank检索器初始化成功")
            return True
            
        except Exception as e:
            logger.error(f"❌ Rerank检索器初始化异常: {e}")
            return False
    
    def retrieve(self, question: str, top_k: int = 5) -> RetrievalResult:
        """
        执行Rerank检索
        
        Args:
            question: 查询问题
            top_k: 返回结果数量
        
        Returns:
            RetrievalResult: 检索结果
        """
        if not self.is_initialized:
            logger.error("❌ Rerank检索器未初始化")
            return RetrievalResult(question, "", 0.0, 0.0)
        
        try:
            # 测量检索时间
            def _retrieve_internal():
                # 1. 初始检索阶段
                initial_candidates = self._initial_retrieval(question)
                
                # 2. 特征提取
                candidates_with_features = self._extract_features(question, initial_candidates)
                
                # 3. 重排序
                reranked_candidates = self._rerank_candidates(question, candidates_with_features)
                
                # 4. 选择最终结果
                final_results = reranked_candidates[:top_k]
                
                # 5. 生成答案
                answer, confidence = self._generate_final_answer(question, final_results)
                
                return answer, confidence, final_results
            
            result_data, response_time = self._measure_time(_retrieve_internal)
            answer, confidence, final_results = result_data
            
            # 更新成功计数
            if answer:
                self.success_count += 1
            
            return RetrievalResult(
                question=question,
                answer=answer,
                confidence=confidence,
                response_time=response_time,
                retrieved_docs=final_results,
                metadata={
                    'method': 'rerank',
                    'rerank_method': self.rerank_method,
                    'initial_candidates': len(initial_candidates) if 'initial_candidates' in locals() else 0,
                    'final_results': len(final_results)
                }
            )
            
        except Exception as e:
            logger.error(f"❌ Rerank检索失败: {e}")
            return RetrievalResult(question, "", 0.0, 0.0)
    
    def _initial_retrieval(self, question: str) -> List[Dict[str, Any]]:
        """
        初始检索阶段
        
        Args:
            question: 查询问题
        
        Returns:
            List[Dict[str, Any]]: 初始候选结果
        """
        candidates = []
        
        if self.use_multiple_retrievers and self.vector_retriever and self.keyword_retriever:
            # 使用多个检索器
            
            # 向量检索
            vector_result = self.vector_retriever.retrieve(question, self.initial_retrieval_size // 2)
            if vector_result.retrieved_docs:
                for doc in vector_result.retrieved_docs:
                    doc['retrieval_method'] = 'vector'
                    doc['initial_score'] = doc.get('score', 0.0)
                candidates.extend(vector_result.retrieved_docs)
            
            # 关键词检索
            keyword_result = self.keyword_retriever.retrieve(question, self.initial_retrieval_size // 2)
            if keyword_result.retrieved_docs:
                for doc in keyword_result.retrieved_docs:
                    doc['retrieval_method'] = 'keyword'
                    doc['initial_score'] = doc.get('score', 0.0)
                candidates.extend(keyword_result.retrieved_docs)
            
            # 去重（基于文档ID）
            seen_ids = set()
            unique_candidates = []
            for candidate in candidates:
                doc_id = candidate.get('id')
                if doc_id not in seen_ids:
                    seen_ids.add(doc_id)
                    unique_candidates.append(candidate)
            
            candidates = unique_candidates
        
        else:
            # 使用简单的文本匹配作为初始检索
            candidates = self._simple_text_matching(question, self.initial_retrieval_size)
        
        logger.info(f"🔍 初始检索获得 {len(candidates)} 个候选结果")
        return candidates
    
    def _simple_text_matching(self, question: str, top_k: int) -> List[Dict[str, Any]]:
        """
        简单文本匹配
        
        Args:
            question: 查询问题
            top_k: 返回数量
        
        Returns:
            List[Dict[str, Any]]: 匹配结果
        """
        candidates = []
        question_words = set(question.lower().split())
        
        for item in self.data:
            # 计算词汇重叠度
            item_text = f"{item['question']} {item.get('answer', '')}"
            item_words = set(item_text.lower().split())
            
            overlap = len(question_words.intersection(item_words))
            total_words = len(question_words.union(item_words))
            
            if total_words > 0:
                score = overlap / total_words
                
                if score > 0.1:  # 最小阈值
                    candidates.append({
                        'id': item.get('id'),
                        'question': item['question'],
                        'answer': item.get('answer', ''),
                        'score': score,
                        'retrieval_method': 'text_matching',
                        'initial_score': score
                    })
        
        # 排序并返回top_k
        candidates.sort(key=lambda x: x['score'], reverse=True)
        return candidates[:top_k]
    
    def _extract_features(self, question: str, candidates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        提取重排序特征
        
        Args:
            question: 查询问题
            candidates: 候选结果
        
        Returns:
            List[Dict[str, Any]]: 带特征的候选结果
        """
        logger.info(f"🔧 开始提取 {len(candidates)} 个候选结果的特征")
        
        for candidate in candidates:
            features = {}
            
            # 1. 语义相似度特征
            features['semantic_similarity'] = self._calculate_semantic_similarity(
                question, candidate.get('question', '')
            )
            
            # 2. 关键词匹配特征
            features['keyword_match'] = self._calculate_keyword_match(
                question, candidate.get('question', ''), candidate.get('answer', '')
            )
            
            # 3. 答案质量特征
            features['answer_quality'] = self._calculate_answer_quality(
                candidate.get('answer', '')
            )
            
            # 4. 相关性分数特征
            features['relevance_score'] = self._calculate_relevance_score(
                question, candidate
            )
            
            # 5. 长度惩罚特征
            features['length_penalty'] = self._calculate_length_penalty(
                candidate.get('answer', '')
            )
            
            # 6. 初始检索分数
            features['initial_score'] = candidate.get('initial_score', 0.0)
            
            candidate['features'] = features
        
        return candidates
    
    def _calculate_semantic_similarity(self, question: str, candidate_question: str) -> float:
        """
        计算语义相似度
        
        Args:
            question: 查询问题
            candidate_question: 候选问题
        
        Returns:
            float: 相似度分数
        """
        try:
            # 使用LLM计算语义相似度
            prompt = f"""
请评估以下两个问题的语义相似度，返回0-1之间的分数：

问题1: {question}
问题2: {candidate_question}

请只返回数字分数，不要其他内容。
"""
            
            response = ollama_client.generate_text(prompt)
            
            # 提取数字
            import re
            numbers = re.findall(r'\d+\.?\d*', response)
            if numbers:
                score = float(numbers[0])
                return min(max(score, 0.0), 1.0)
            
            # 备用方法：词汇重叠
            words1 = set(question.lower().split())
            words2 = set(candidate_question.lower().split())
            
            if not words1 or not words2:
                return 0.0
            
            intersection = words1.intersection(words2)
            union = words1.union(words2)
            
            return len(intersection) / len(union)
            
        except Exception as e:
            logger.warning(f"⚠️ 语义相似度计算失败: {e}")
            return 0.0
    
    def _calculate_keyword_match(self, question: str, candidate_question: str, candidate_answer: str) -> float:
        """
        计算关键词匹配度
        
        Args:
            question: 查询问题
            candidate_question: 候选问题
            candidate_answer: 候选答案
        
        Returns:
            float: 匹配度分数
        """
        # 提取关键词
        question_keywords = self._extract_keywords(question)
        candidate_text = f"{candidate_question} {candidate_answer}"
        
        if not question_keywords:
            return 0.0
        
        match_count = 0
        for keyword in question_keywords:
            if keyword.lower() in candidate_text.lower():
                match_count += 1
        
        return match_count / len(question_keywords)
    
    def _extract_keywords(self, text: str) -> List[str]:
        """
        提取关键词
        
        Args:
            text: 输入文本
        
        Returns:
            List[str]: 关键词列表
        """
        # 简单的关键词提取
        words = text.split()
        
        # 过滤停用词
        stop_words = {'的', '是', '在', '有', '和', '与', '或', '但', '如果', '因为', '所以', '什么', '怎么', '为什么', '哪里', '谁', '如何'}
        keywords = [word for word in words if word not in stop_words and len(word) > 1]
        
        return keywords
    
    def _calculate_answer_quality(self, answer: str) -> float:
        """
        计算答案质量
        
        Args:
            answer: 答案文本
        
        Returns:
            float: 质量分数
        """
        if not answer:
            return 0.0
        
        score = 0.0
        
        # 长度因子（适中的长度更好）
        length = len(answer)
        if 50 <= length <= 500:
            score += 0.3
        elif 20 <= length <= 1000:
            score += 0.2
        else:
            score += 0.1
        
        # 结构化程度
        if '。' in answer or '！' in answer or '？' in answer:
            score += 0.2
        
        # 包含数字或具体信息
        if re.search(r'\d+', answer):
            score += 0.1
        
        # 包含专业术语
        professional_terms = ['系统', '方法', '技术', '原理', '过程', '结果', '分析', '研究']
        for term in professional_terms:
            if term in answer:
                score += 0.05
                break
        
        # 避免过于简单的答案
        if len(answer.split()) < 3:
            score *= 0.5
        
        return min(score, 1.0)
    
    def _calculate_relevance_score(self, question: str, candidate: Dict[str, Any]) -> float:
        """
        计算相关性分数
        
        Args:
            question: 查询问题
            candidate: 候选结果
        
        Returns:
            float: 相关性分数
        """
        try:
            # 使用LLM评估相关性
            prompt = f"""
请评估以下问题和答案的相关性，返回0-1之间的分数：

用户问题: {question}
候选问题: {candidate.get('question', '')}
候选答案: {candidate.get('answer', '')}

请只返回数字分数，不要其他内容。
"""
            
            response = ollama_client.generate_text(prompt)
            
            # 提取数字
            numbers = re.findall(r'\d+\.?\d*', response)
            if numbers:
                score = float(numbers[0])
                return min(max(score, 0.0), 1.0)
            
            # 备用方法
            return candidate.get('initial_score', 0.0)
            
        except Exception as e:
            logger.warning(f"⚠️ 相关性分数计算失败: {e}")
            return candidate.get('initial_score', 0.0)
    
    def _calculate_length_penalty(self, answer: str) -> float:
        """
        计算长度惩罚
        
        Args:
            answer: 答案文本
        
        Returns:
            float: 长度惩罚分数（越高越好）
        """
        if not answer:
            return 0.0
        
        length = len(answer)
        
        # 理想长度范围
        if 100 <= length <= 300:
            return 1.0
        elif 50 <= length <= 500:
            return 0.8
        elif 20 <= length <= 1000:
            return 0.6
        else:
            return 0.3
    
    def _rerank_candidates(self, question: str, candidates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        重排序候选结果
        
        Args:
            question: 查询问题
            candidates: 带特征的候选结果
        
        Returns:
            List[Dict[str, Any]]: 重排序后的结果
        """
        logger.info(f"🔄 开始重排序 {len(candidates)} 个候选结果")
        
        if self.rerank_method == "ollama_rerank":
            return self._ollama_rerank(question, candidates)
        elif self.rerank_method == "llm_scoring":
            return self._llm_scoring_rerank(question, candidates)
        elif self.rerank_method == "feature_weighted":
            return self._feature_weighted_rerank(candidates)
        elif self.rerank_method == "listwise":
            return self._listwise_rerank(question, candidates)
        else:
            return self._feature_weighted_rerank(candidates)
    
    def _feature_weighted_rerank(self, candidates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        基于特征加权的重排序
        
        Args:
            candidates: 候选结果
        
        Returns:
            List[Dict[str, Any]]: 重排序后的结果
        """
        for candidate in candidates:
            features = candidate.get('features', {})
            
            # 计算加权分数
            weighted_score = 0.0
            for feature_name, weight in self.feature_weights.items():
                feature_value = features.get(feature_name, 0.0)
                weighted_score += feature_value * weight
            
            candidate['rerank_score'] = weighted_score
        
        # 排序
        candidates.sort(key=lambda x: x.get('rerank_score', 0.0), reverse=True)
        return candidates
    
    def _ollama_rerank(self, question: str, candidates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        使用Ollama rerank功能进行重排序
        
        Args:
            question: 查询问题
            candidates: 候选结果
        
        Returns:
            List[Dict[str, Any]]: 重排序后的结果
        """
        try:
            logger.info(f"🤖 使用Ollama rerank重排序 {len(candidates)} 个候选结果")
            
            # 准备文档列表
            documents = []
            for candidate in candidates:
                doc_text = f"{candidate.get('question', '')} {candidate.get('answer', '')}"
                documents.append(doc_text)
            
            # 使用Ollama rerank功能
            if hasattr(ollama_client, 'rerank_documents'):
                rerank_results = ollama_client.rerank_documents(
                    query=question,
                    documents=documents,
                    top_k=len(candidates)
                )
                
                # 重新排序候选结果
                reranked_candidates = []
                for doc_idx, relevance_score in rerank_results:
                    if doc_idx < len(candidates):
                        candidate_copy = candidates[doc_idx].copy()
                        candidate_copy['rerank_score'] = relevance_score
                        reranked_candidates.append(candidate_copy)
                
                # 如果rerank结果不完整，添加剩余的候选结果
                reranked_indices = {idx for idx, _ in rerank_results}
                for i, candidate in enumerate(candidates):
                    if i not in reranked_indices:
                        candidate_copy = candidate.copy()
                        candidate_copy['rerank_score'] = 0.1  # 给一个较低的分数
                        reranked_candidates.append(candidate_copy)
                
                logger.info("✅ Ollama rerank重排序完成")
                return reranked_candidates
            else:
                # 如果没有rerank功能，使用LLM评分
                return self._llm_scoring_rerank(question, candidates)
                
        except Exception as e:
            logger.error(f"❌ Ollama rerank重排序失败: {e}")
            # 如果rerank失败，使用LLM评分作为备用
            return self._llm_scoring_rerank(question, candidates)
    
    def _llm_scoring_rerank(self, question: str, candidates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        基于LLM评分的重排序
        
        Args:
            question: 查询问题
            candidates: 候选结果
        
        Returns:
            List[Dict[str, Any]]: 重排序后的结果
        """
        for i, candidate in enumerate(candidates):
            try:
                # 使用LLM对每个候选结果评分
                prompt = f"""
请对以下问答对的相关性和质量进行评分（0-10分）：

用户问题: {question}
候选问题: {candidate.get('question', '')}
候选答案: {candidate.get('answer', '')}

评分标准：
- 相关性：问题是否匹配
- 准确性：答案是否正确
- 完整性：答案是否完整
- 清晰度：答案是否清晰

请只返回数字分数（0-10），不要其他内容。
"""
                
                response = ollama_client.generate_text(prompt)
                
                # 提取分数
                numbers = re.findall(r'\d+\.?\d*', response)
                if numbers:
                    score = float(numbers[0]) / 10.0  # 归一化到0-1
                    candidate['rerank_score'] = min(max(score, 0.0), 1.0)
                else:
                    candidate['rerank_score'] = candidate.get('initial_score', 0.0)
                
            except Exception as e:
                logger.warning(f"⚠️ LLM评分失败: {e}")
                candidate['rerank_score'] = candidate.get('initial_score', 0.0)
        
        # 排序
        candidates.sort(key=lambda x: x.get('rerank_score', 0.0), reverse=True)
        return candidates
    
    def _listwise_rerank(self, question: str, candidates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        基于列表式的重排序
        
        Args:
            question: 查询问题
            candidates: 候选结果
        
        Returns:
            List[Dict[str, Any]]: 重排序后的结果
        """
        try:
            # 构建候选列表
            candidate_list = []
            for i, candidate in enumerate(candidates[:10]):  # 限制数量避免prompt过长
                candidate_list.append(f"{i+1}. 问题: {candidate.get('question', '')}\n   答案: {candidate.get('answer', '')[:100]}...")
            
            candidates_text = "\n\n".join(candidate_list)
            
            prompt = f"""
请对以下候选问答对按照与用户问题的相关性进行排序：

用户问题: {question}

候选问答对：
{candidates_text}

请返回排序后的编号列表，用逗号分隔，例如：3,1,5,2,4
"""
            
            response = ollama_client.generate_text(prompt)
            
            # 解析排序结果
            order_match = re.search(r'([\d,\s]+)', response)
            if order_match:
                order_str = order_match.group(1)
                order_indices = [int(x.strip()) - 1 for x in order_str.split(',') if x.strip().isdigit()]
                
                # 重新排序
                reordered_candidates = []
                used_indices = set()
                
                for idx in order_indices:
                    if 0 <= idx < len(candidates) and idx not in used_indices:
                        candidates[idx]['rerank_score'] = 1.0 - (len(reordered_candidates) * 0.1)
                        reordered_candidates.append(candidates[idx])
                        used_indices.add(idx)
                
                # 添加未排序的候选
                for i, candidate in enumerate(candidates):
                    if i not in used_indices:
                        candidate['rerank_score'] = 0.1
                        reordered_candidates.append(candidate)
                
                return reordered_candidates
            
        except Exception as e:
            logger.warning(f"⚠️ 列表式重排序失败: {e}")
        
        # 备用方法
        return self._feature_weighted_rerank(candidates)
    
    def _generate_final_answer(self, question: str, final_results: List[Dict[str, Any]]) -> Tuple[str, float]:
        """
        生成最终答案
        
        Args:
            question: 查询问题
            final_results: 最终结果
        
        Returns:
            Tuple[str, float]: (答案, 置信度)
        """
        if not final_results:
            # 使用LLM直接回答
            answer = ollama_client.answer_question(question)
            return answer, 0.3
        
        # 获取最佳结果
        best_result = final_results[0]
        answer = best_result.get('answer', '')
        confidence = best_result.get('rerank_score', 0.0)
        
        # 如果置信度较低，使用LLM增强答案
        if confidence < self.min_confidence:
            context_parts = []
            for result in final_results[:3]:
                if result.get('answer'):
                    context_parts.append(result['answer'])
            
            context = "\n".join(context_parts)
            enhanced_answer = ollama_client.answer_question(question, context)
            
            if enhanced_answer and len(enhanced_answer) > len(answer):
                answer = enhanced_answer
                confidence = min(confidence + 0.2, 1.0)
        
        return answer, confidence
    
    def _load_cache(self) -> None:
        """
        加载缓存
        """
        try:
            if os.path.exists(self.cache_path):
                with open(self.cache_path, 'r', encoding='utf-8') as f:
                    self.rerank_cache = json.load(f)
                logger.info(f"📥 加载Rerank缓存: {len(self.rerank_cache)} 条记录")
        except Exception as e:
            logger.warning(f"⚠️ 加载Rerank缓存失败: {e}")
            self.rerank_cache = {}
    
    def _save_cache(self) -> None:
        """
        保存缓存
        """
        try:
            os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
            with open(self.cache_path, 'w', encoding='utf-8') as f:
                json.dump(self.rerank_cache, f, ensure_ascii=False, indent=2)
            logger.info(f"💾 保存Rerank缓存: {len(self.rerank_cache)} 条记录")
        except Exception as e:
            logger.warning(f"⚠️ 保存Rerank缓存失败: {e}")


if __name__ == "__main__":
    # 测试Rerank检索器
    config = {
        "initial_retrieval_size": 20,
        "final_top_k": 5,
        "rerank_method": "ollama_rerank",
        "use_multiple_retrievers": True,
        "min_confidence": 0.5
    }
    
    retriever = RerankRetriever(config)
    
    # 测试数据
    test_data = [
        {
            "id": 1,
            "question": "什么是机器学习？",
            "answer": "机器学习是人工智能的一个分支，通过算法让计算机从数据中学习。",
            "category": "AI"
        },
        {
            "id": 2,
            "question": "深度学习的原理是什么？",
            "answer": "深度学习使用多层神经网络来模拟人脑的学习过程。",
            "category": "AI"
        }
    ]
    
    # 初始化
    if retriever.initialize(test_data):
        # 测试检索
        result = retriever.retrieve("机器学习是什么？")
        print(f"检索结果: {result.to_dict()}")
        
        # 性能统计
        stats = retriever.get_performance_stats()
        print(f"性能统计: {stats}")