import json
import os
import re
import time
import pickle
import numpy as np
import pandas as pd
from typing import Dict, List, Tuple, Optional, Any, Union
from collections import OrderedDict
import logging
import asyncio
from concurrent.futures import ThreadPoolExecutor
import heapq

# 第三方库导入
try:
    from sentence_transformers import SentenceTransformer
    import faiss
    from sklearn.feature_extraction.text import TfidfVectorizer
    from sklearn.metrics.pairwise import cosine_similarity
    import jieba
    import jieba.analyse
except ImportError as e:
    print(f"请安装所需库: pip install sentence-transformers faiss-cpu scikit-learn jieba")
    raise e

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class LRUCache:
    """LRU缓存实现"""

    def __init__(self, maxsize: int = 1000):
        self.cache = OrderedDict()
        self.maxsize = maxsize
        self.hits = 0
        self.misses = 0

    def get(self, key: str) -> Any:
        if key in self.cache:
            self.cache.move_to_end(key)
            self.hits += 1
            return self.cache[key]
        self.misses += 1
        return None

    def put(self, key: str, value: Any):
        if key in self.cache:
            self.cache.move_to_end(key)
        self.cache[key] = value
        if len(self.cache) > self.maxsize:
            self.cache.popitem(last=False)

    def hit_rate(self) -> float:
        total = self.hits + self.misses
        return self.hits / total if total > 0 else 0.0


class SemanticRetriever:
    """语义检索组件（无过滤器版本）"""

    def __init__(self, data_dir: str = None):
        self.data_dir = data_dir or r"D:\sjysds\pythonProject1\data"
        self.model = None
        self.index = None
        self.unit_ids = []
        self.unit_contents = {}
        self._initialize()

    def _initialize(self):
        """初始化语义检索器"""
        try:
            # 加载模型
            model_path = os.path.join(self.data_dir, "models", "all-MiniLM-L6-v2")
            os.makedirs(os.path.dirname(model_path), exist_ok=True)

            try:
                self.model = SentenceTransformer(model_path)
            except:
                # 如果本地模型不存在，下载并保存
                self.model = SentenceTransformer('all-MiniLM-L6-v2')
                self.model.save(model_path)

            # 加载索引
            index_path = os.path.join(self.data_dir, "semantic_index.faiss")
            vectors_path = os.path.join(self.data_dir, "knowledge_vectors.npy")
            unit_ids_path = os.path.join(self.data_dir, "semantic_index_unit_ids.pkl")

            if os.path.exists(index_path) and os.path.exists(vectors_path) and os.path.exists(unit_ids_path):
                self.index = faiss.read_index(index_path)
                self.unit_ids = pickle.load(open(unit_ids_path, 'rb'))
                logger.info("语义索引加载成功")
            else:
                logger.warning("语义索引文件不存在，需要先构建索引")

            # 加载知识内容
            self._load_knowledge_contents()

        except Exception as e:
            logger.warning(f"语义检索器初始化失败: {e}, 使用TF-IDF备选")
            self._initialize_tfidf()

    def _initialize_tfidf(self):
        """初始化TF-IDF检索器"""
        try:
            # 加载知识库
            knowledge_file = os.path.join(self.data_dir, "knowledge_units.jsonl")
            if not os.path.exists(knowledge_file):
                logger.warning(f"知识库文件不存在: {knowledge_file}")
                return

            knowledge_units = []

            with open(knowledge_file, 'r', encoding='utf-8') as f:
                for line in f:
                    try:
                        unit = json.loads(line.strip())
                        knowledge_units.append(unit)
                    except:
                        continue

            self.unit_ids = [unit['unit_id'] for unit in knowledge_units]
            knowledge_contents = [unit['core_content'] for unit in knowledge_units]

            # 构建TF-IDF
            self.vectorizer = TfidfVectorizer(max_features=5000, stop_words=None)
            self.tfidf_vectors = self.vectorizer.fit_transform(knowledge_contents)

            # 存储知识内容
            for unit in knowledge_units:
                self.unit_contents[unit['unit_id']] = unit['core_content']

            logger.info("TF-IDF检索器初始化完成")

        except Exception as e:
            logger.error(f"TF-IDF检索器初始化失败: {e}")

    def _load_knowledge_contents(self):
        """加载知识内容"""
        try:
            knowledge_file = os.path.join(self.data_dir, "knowledge_units.jsonl")
            if not os.path.exists(knowledge_file):
                return

            with open(knowledge_file, 'r', encoding='utf-8') as f:
                for line in f:
                    try:
                        unit = json.loads(line.strip())
                        self.unit_contents[unit['unit_id']] = unit['core_content']
                    except:
                        continue
        except Exception as e:
            logger.warning(f"加载知识内容失败: {e}")

    def retrieve(self, query: str, top_k: int = 3) -> List[Dict]:
        """执行语义检索（完全不过滤）"""
        try:
            if self.model and self.index:
                # 使用Sentence-BERT检索
                query_vector = self.model.encode([query], convert_to_numpy=True)
                distances, indices = self.index.search(query_vector, top_k)

                results = []
                for i in range(top_k):
                    if indices[0][i] < len(self.unit_ids):
                        unit_id = self.unit_ids[indices[0][i]]
                        results.append({
                            'unit_id': unit_id,
                            'content': self.unit_contents.get(unit_id, ""),
                            'score': float(1 - distances[0][i]),
                            'source': 'semantic'
                        })
                return results

            elif hasattr(self, 'vectorizer') and self.tfidf_vectors is not None:
                # 使用TF-IDF检索
                query_vec = self.vectorizer.transform([query])
                similarities = cosine_similarity(query_vec, self.tfidf_vectors)
                top_indices = similarities.argsort()[0][-top_k:][::-1]

                results = []
                for idx in top_indices:
                    if idx < len(self.unit_ids):
                        unit_id = self.unit_ids[idx]
                        results.append({
                            'unit_id': unit_id,
                            'content': self.unit_contents.get(unit_id, ""),
                            'score': float(similarities[0][idx]),
                            'source': 'tfidf'
                        })
                return results

        except Exception as e:
            logger.error(f"检索失败: {e}")

        return []


class KeywordRetriever:
    """关键词检索组件（无过滤器版本）"""

    def __init__(self, data_dir: str = None):
        self.data_dir = data_dir or r"D:\sjysds\pythonProject1\data"
        self.keyword_index = {}
        self.unit_contents = {}
        self._initialize()

    def _initialize(self):
        """初始化关键词检索器"""
        try:
            index_path = os.path.join(self.data_dir, "keyword_index.pkl")
            if os.path.exists(index_path):
                with open(index_path, 'rb') as f:
                    self.keyword_index = pickle.load(f)
                logger.info("关键词索引加载成功")

            # 加载知识内容
            self._load_knowledge_contents()

        except Exception as e:
            logger.error(f"关键词检索器初始化失败: {e}")

    def _load_knowledge_contents(self):
        """加载知识内容"""
        try:
            knowledge_file = os.path.join(self.data_dir, "knowledge_units.jsonl")
            if not os.path.exists(knowledge_file):
                return

            with open(knowledge_file, 'r', encoding='utf-8') as f:
                for line in f:
                    try:
                        unit = json.loads(line.strip())
                        self.unit_contents[unit['unit_id']] = unit['core_content']
                    except:
                        continue
        except Exception as e:
            logger.warning(f"加载知识内容失败: {e}")

    def extract_keywords(self, text: str, top_n: int = 10) -> List[str]:
        """提取关键词"""
        try:
            keywords = jieba.analyse.extract_tags(text, topK=top_n, withWeight=False)
            return keywords
        except:
            # 简单分词备选
            words = re.findall(r'[\u4e00-\u9fa5]{2,}|[A-Za-z]{3,}', text)
            return list(set(words))[:top_n]

    def retrieve(self, query: str, top_k: int = 3) -> List[Dict]:
        """执行关键词检索（完全不过滤）"""
        keywords = self.extract_keywords(query)

        results = []
        seen_units = set()

        for keyword in keywords:
            if keyword in self.keyword_index:
                for unit_id in self.keyword_index[keyword]:
                    if unit_id not in seen_units and unit_id in self.unit_contents:
                        results.append({
                            'unit_id': unit_id,
                            'content': self.unit_contents[unit_id],
                            'score': 0.7,
                            'source': 'keyword',
                            'matched_keyword': keyword
                        })
                        seen_units.add(unit_id)

                        if len(results) >= top_k:
                            break

        return results[:top_k]


class RAGService:
    """RAG服务接口：为模型预测提供实时知识检索支撑（无过滤器版本）"""

    def __init__(self, mode: str = "hybrid", data_dir: str = None):
        self.mode = mode  # semantic, keyword, hybrid
        self.data_dir = data_dir or r"D:\sjysds\pythonProject1\data"
        self.semantic_retriever = None
        self.keyword_retriever = None
        self.cache = LRUCache(maxsize=1000)
        self._initialize_components()
        self._warmup_cache()

    def _initialize_components(self):
        """初始化所有检索组件"""
        try:
            self.semantic_retriever = SemanticRetriever(self.data_dir)
            self.keyword_retriever = KeywordRetriever(self.data_dir)
            logger.info("检索组件初始化完成")
        except Exception as e:
            logger.error(f"组件初始化失败: {e}")

    def _warmup_cache(self):
        """预热缓存：预加载常见问题模式"""
        common_queries = [
            "5G技术特点", "LTE切换参数", "基站故障处理",
            "网络优化方法", "KPI指标计算", "射频设备参数"
        ]

        for query in common_queries:
            self.retrieve_for_question(query)
        logger.info("缓存预热完成")

    def _fuse_results(self, results_dict: Dict[str, List[Dict]], top_k: int = 3) -> List[Dict]:
        """融合多源检索结果"""
        all_results = []

        # 收集所有结果
        for source, results in results_dict.items():
            for result in results:
                all_results.append(result)

        # 去重并排序
        seen_units = set()
        fused_results = []

        for result in sorted(all_results, key=lambda x: x['score'], reverse=True):
            if result['unit_id'] not in seen_units:
                fused_results.append(result)
                seen_units.add(result['unit_id'])

                if len(fused_results) >= top_k:
                    break

        return fused_results

    def retrieve_for_question(self, question: str, top_k: int = 3) -> Dict:
        """
        为问题检索相关知识（完全不过滤）
        返回：{知识片段列表, 检索来源, 置信度}
        """
        cache_key = f"q_{hash(question)}"
        if cached := self.cache.get(cache_key):
            return cached

        start_time = time.time()
        results_dict = {}

        # 多维度检索
        if self.mode in ["semantic", "hybrid"] and self.semantic_retriever:
            results_dict["semantic"] = self.semantic_retriever.retrieve(question, top_k)

        if self.mode in ["keyword", "hybrid"] and self.keyword_retriever:
            results_dict["keyword"] = self.keyword_retriever.retrieve(question, top_k)

        # 结果融合
        fused_results = self._fuse_results(results_dict, top_k)

        response = {
            "knowledge": fused_results,
            "sources": list(results_dict.keys()),
            "response_time": time.time() - start_time,
            "query": question,
            "retained_count": len(fused_results)
        }

        self.cache.put(cache_key, response)
        return response

    def retrieve_for_option(self, question: str, option: str, option_label: str, top_k: int = 2) -> Dict:
        """
        为特定选项检索相关知识
        """
        # 组合查询：问题 + 选项内容
        combined_query = f"{question} 选项{option_label}: {option}"
        return self.retrieve_for_question(combined_query, top_k)

    def retrieve_for_question_option_pair(self, question: str, options: Dict[str, str]) -> Dict:
        """
        为问题+所有选项检索相关知识（批量处理）
        """
        results = {}
        for opt_label, opt_content in options.items():
            if opt_content and opt_content.strip():  # 跳过空选项
                results[opt_label] = self.retrieve_for_option(question, opt_content, opt_label)

        return results

    def batch_retrieve(self, questions: List[str]) -> List[Dict]:
        """批量检索"""
        with ThreadPoolExecutor(max_workers=4) as executor:
            results = list(executor.map(
                self.retrieve_for_question, questions
            ))
        return results

    def get_retrieval_stats(self) -> Dict:
        """获取检索统计信息"""
        return {
            "cache_hit_rate": self.cache.hit_rate(),
            "cache_size": len(self.cache.cache),
            "cache_hits": self.cache.hits,
            "cache_misses": self.cache.misses
        }


def clean_content_basic(content: str) -> str:
    """
    基础内容清洗（可选后处理）
    只处理最明显的噪声，不进行过滤
    """
    if not content:
        return ""

    # 简单清理明显噪声（不影响内容）
    content = re.sub(r'[\.\,\;\:\-\+]{3,}', ' ', content)  # 减少重复标点
    content = re.sub(r'\s+', ' ', content)  # 多余空格
    return content.strip()


def enhance_with_knowledge(rag_service: RAGService, question: str, options: Dict[str, str]) -> Dict[str, str]:
    """
    为模型预测提供知识增强（无过滤版本）
    """
    # 1. 检索相关知识
    knowledge = rag_service.retrieve_for_question_option_pair(question, options)

    # 2. 构建增强输入
    enhanced_inputs = {}
    for opt_label, opt_content in options.items():
        if opt_content and opt_content.strip():
            opt_knowledge = knowledge.get(opt_label, {})
            knowledge_items = opt_knowledge.get('knowledge', [])

            # 使用所有检索到的知识（不过滤）
            knowledge_texts = [k['content'] for k in knowledge_items[:2]]  # 只取前2个

            # 可选：简单清理显示
            cleaned_texts = [clean_content_basic(text) for text in knowledge_texts]
            knowledge_text = "；".join(cleaned_texts)

            enhanced_input = f"""问题: {question}
选项{opt_label}: {opt_content}
相关知识: {knowledge_text}
请判断该选项是否正确。"""

            enhanced_inputs[opt_label] = enhanced_input

    return enhanced_inputs


# 使用示例
if __name__ == "__main__":
    # 初始化RAG服务
    rag_service = RAGService(mode="hybrid")

    # 示例使用
    question = "5G网络中的Massive MIMO技术有什么特点？"
    options = {
        "A": "使用大量天线提高容量",
        "B": "降低网络覆盖范围",
        "C": "增加终端设备功耗",
        "D": "减少频谱效率"
    }

    # 检索相关知识
    knowledge = rag_service.retrieve_for_question(question)
    print("问题相关知识:")
    for i, item in enumerate(knowledge['knowledge'][:3], 1):
        print(f"{i}. {item['content'][:100]}... (分数: {item['score']:.3f}, 来源: {item.get('source', 'unknown')})")

    # 显示统计信息
    print(f"\n检索统计: 保留 {knowledge['retained_count']} 个知识片段")

    # 为选项检索知识
    option_knowledge = rag_service.retrieve_for_option(question, options["A"], "A")
    print(f"\n选项A相关知识: {option_knowledge['knowledge'][0]['content'][:100]}...")

    # 获取统计信息
    stats = rag_service.get_retrieval_stats()
    print(f"\n缓存统计: 命中率 {stats['cache_hit_rate']:.2%}")

    # 知识增强
    enhanced_inputs = enhance_with_knowledge(rag_service, question, options)
    print(f"\n增强后的输入示例（选项A）:")
    print(enhanced_inputs["A"][:200] + "...")