import json
from datetime import datetime

from langchain.schema import Document as LCDocument  # LangChain Document
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_chroma import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import word_document
import os
import re
from langchain_experimental.text_splitter import SemanticChunker
from PyPDF2 import PdfReader
from docx import Document as WordDocument  # Word文档处理类
from langchain.chains import RetrievalQA
from elasticsearch import Elasticsearch
from typing import List, Dict, Union, Optional
from langchain_community.vectorstores import ElasticsearchStore


class VectorStoreEvaluator:
    """改进后的向量存储检索质量评估工具"""

    def __init__(self, ground_truth=None):
        """
        初始化评估器
        参数:
            ground_truth: 每个查询对应的标准答案文档ID列表 (可选)
                        格式: {query: [doc_id1, doc_id2]}
        """
        self.ground_truth = ground_truth or {}

    def evaluate_retrieval(self, query: str, documents: list[LCDocument]) -> dict:
        """
        评估检索结果质量
        参数:
            query: 查询字符串
            documents: 检索到的文档列表(LangChain Document对象)
        返回:
            {
                "precision": float,  # 准确率
                "recall": float,     # 召回率
                "f1": float,         # F1值
                "feedback": str       # 改进建议
            }
        """
        if not documents:
            return {
                "precision": 0,
                "recall": 0,
                "f1": 0,
                "feedback": "未检索到相关内容"
            }

        # 获取检索到的文档ID
        retrieved_ids = {doc.metadata.get("paragraph_id") for doc in documents}

        # 如果有标准答案，计算精确指标
        if query in self.ground_truth:
            relevant_ids = set(self.ground_truth[query])
            true_positives = len(retrieved_ids & relevant_ids)

            # 计算各项指标
            precision = true_positives / len(retrieved_ids) if retrieved_ids else 0
            recall = true_positives / len(relevant_ids) if relevant_ids else 0
            f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0

            feedback = self._generate_feedback(precision, recall)

            return {
                "precision": round(precision, 4),
                "recall": round(recall, 4),
                "f1": round(f1, 4),
                "feedback": feedback
            }
        else:
            # 没有标准答案时的简单评估
            query_terms = set(query.lower().split())
            avg_precision = 0

            for doc in documents:
                doc_terms = set(doc.page_content.lower().split())
                common_terms = query_terms & doc_terms
                precision = len(common_terms) / len(query_terms) if query_terms else 0
                avg_precision += precision

            avg_precision = avg_precision / len(documents) if documents else 0

            return {
                "precision": round(avg_precision, 4),
                "recall": 0,  # 无标准答案无法计算召回率
                "f1": 0,  # 无标准答案无法计算F1
                "feedback": "无标准答案，仅基于词频计算近似准确率"
            }

    def _generate_feedback(self, precision: float, recall: float) -> str:
        """根据评估结果生成反馈建议"""
        if precision < 0.5 and recall < 0.5:
            return "检索质量较差，建议优化查询词和向量化模型"
        elif precision < 0.7 and recall >= 0.7:
            return "召回率良好但准确率不足，建议优化排序算法或增加过滤条件"
        elif precision >= 0.7 and recall < 0.7:
            return "准确率良好但召回率不足，建议扩展查询词或调整检索阈值"
        elif precision >= 0.8 and recall >= 0.8:
            return "检索质量优秀"
        else:
            return "检索质量良好"


class VectorStoreTools:
    def __init__(self, path):
        # 采用的向量化模型
        self.embeddings = DashScopeEmbeddings(
            model="text-embedding-v3",
            dashscope_api_key=os.getenv("DASHSCOPE_API_KEY")
        )

        # 初始化合同专用向量存储
        self.contract_store = Chroma(
            collection_name="insurance_contracts",
            embedding_function=self.embeddings,
            persist_directory="./contracts_db"
        )

        # 使用语义分割器
        self.text_splitter = SemanticChunker(
            embeddings=self.embeddings,
            breakpoint_threshold_type="percentile",
            breakpoint_threshold_amount=95
        )
        # 存储不同key对应的向量库
        self.vector_stores = {}
        # 文件的路径
        self.path = path
        # 敏感词库
        self.sensitive_words = [
            "反政府", "颠覆国家", "分裂国家", "邪教",
            "暴力", "恐怖主义", "色情", "赌博",
            "毒品", "诈骗", "仇恨言论", "种族歧视"
        ]
        self.replacement_char = "*"

        # ES客户端配置
        self.es_client = Elasticsearch(
            hosts=[os.getenv("ES_HOST", "http://localhost:9200")],
            basic_auth=(os.getenv("ES_USERNAME", "elastic"),
                        os.getenv("ES_PASSWORD", ""))
        )
        # 混合检索配置
        self.hybrid_index_name = "hybrid_documents"
        self.hybrid_search_ratio = 0.5  # 混合检索中语义检索的权重
        if not os.path.exists(self.path):
            raise FileNotFoundError(f"文档文件未找到: {self.path}")


    def evaluate_retrieval(self, query: str, key: str = 'hetong') -> dict:
        """评估检索质量"""
        retriever = self.get_retriever(key)
        documents = retriever.get_relevant_documents(query)
        return VectorStoreEvaluator().evaluate_retrieval(query, documents)

    def filter_sensitive_content(self, text):
        """过滤文本中的敏感词"""
        for word in self.sensitive_words:
            pattern = re.compile(re.escape(word), re.IGNORECASE)
            replacement = self.replacement_char * len(word)
            text = pattern.sub(replacement, text)
        return text

    def get_document(self):
        file_ext = os.path.splitext(self.path)[1][1:]
        if file_ext == "docx":
            # 使用python-docx处理Word文档
            doc = WordDocument(self.path)
            full_text = []
            for para in doc.paragraphs:
                filtered_text = self.filter_sensitive_content(para.text)
                full_text.append(filtered_text)

            content = "\n".join(full_text)
            # 创建LangChain的Document对象
            documents = [
                LCDocument(
                    page_content=content,
                    metadata={
                        "source": self.path,
                        "paragraph_id": "1001",
                        "file_type": "docx",
                        "content_filtered": "true"
                    }
                )
            ]
            return self.text_splitter.split_documents(documents)
        elif file_ext == "json":
            with open(self.path, 'r', encoding='utf-8') as f:
                data = json.load(f)

            documents = []
            for item in data:
                # 条款部分
                filtered_clause = self.filter_sensitive_content(item["条款"])
                documents.append(LCDocument(
                    page_content=f"条款内容: {filtered_clause}",
                    metadata={
                        "source": self.path,
                        "paragraph_id": item["ID"],
                        "file_type": "insurance_clause",
                        "product": item["产品名"],
                        "content_type": "clause"
                    }
                ))

                # 问题部分
                filtered_question = self.filter_sensitive_content(item["问题"])
                documents.append(LCDocument(
                    page_content=f"问题: {filtered_question}\n答案: {item['答案']}",
                    metadata={
                        "source": self.path,
                        "paragraph_id": f"{item['ID']}_qa",
                        "file_type": "insurance_qa",
                        "product": item["产品名"],
                        "content_type": "qa_pair"
                    }
                ))

            return self.text_splitter.split_documents(documents)
        # 其他文件类型的处理保持不变...

        elif file_ext == "pdf":
            pass

        elif file_ext == "txt":
            with open(self.path, 'r', encoding='utf-8') as f:
                try:
                    data = json.load(f)
                    documents = []
                    for item in data:
                        for para_id, text in item.items():
                            filtered_text = self.filter_sensitive_content(text)
                            documents.append(
                                LCDocument(
                                    page_content=filtered_text,
                                    metadata={
                                        "source": self.path,
                                        "paragraph_id": para_id,
                                        "file_type": "json_txt",
                                        "content_filtered": "true"
                                    }
                                )
                            )
                except json.JSONDecodeError:
                    with open(self.path, 'r', encoding='utf-8') as f:
                        content = f.read()
                    filtered_content = self.filter_sensitive_content(content)
                    documents = [
                        LCDocument(
                            page_content=filtered_content,
                            metadata={
                                "source": self.path,
                                "paragraph_id": "1001",
                                "file_type": "plain_txt",
                                "content_filtered": "true"
                            }
                        )
                    ]
            return self.text_splitter.split_documents(documents)

        else:
            raise ValueError(f"不支持的文件类型: {file_ext}")

    def set_voctorstore(self, key):
        """创建新的向量存储"""
        documents = self.get_document()
        vectorstore = Chroma.from_documents(
            documents=documents,
            embedding=self.embeddings,
            persist_directory="./chroma_db"
        )
        self.vector_stores[key] = vectorstore

        # 同时将文档索引到ES
        self._index_to_es(documents)

        return vectorstore

    def get_voctorstore(self, key):
        """获取已存在的向量存储"""
        if key in self.vector_stores:
            return self.vector_stores[key]
        vectorstore = Chroma(
            persist_directory='./chroma_db',
            embedding_function=self.embeddings
        )
        self.vector_stores[key] = vectorstore
        return vectorstore

    def get_retriever(self, key, k=2):
        """获取检索器"""
        vectorstore = self.get_voctorstore(key)
        return vectorstore.as_retriever(search_kwargs={"k": k})

    def _index_to_es(self, documents: List[LCDocument]):
        """将文档索引到Elasticsearch"""
        if not self.es_client.indices.exists(index=self.hybrid_index_name):
            # 创建索引映射
            mapping = {
                "mappings": {
                    "properties": {
                        "content": {"type": "text"},
                        "embedding": {
                            "type": "dense_vector",
                            "dims": 1536,  # 根据实际嵌入维度调整
                            "index": True,
                            "similarity": "cosine"
                        },
                        "metadata": {
                            "type": "object",
                            "properties": {
                                "source": {"type": "keyword"},
                                "paragraph_id": {"type": "keyword"},
                                "file_type": {"type": "keyword"},
                                "content_filtered": {"type": "keyword"}
                            }
                        }
                    }
                }
            }
            self.es_client.indices.create(index=self.hybrid_index_name, body=mapping)

        # 批量索引文档
        actions = []
        for doc in documents:
            # 生成嵌入向量
            embedding = self.embeddings.embed_query(doc.page_content)

            action = {
                "_index": self.hybrid_index_name,
                "_source": {
                    "content": doc.page_content,
                    "embedding": embedding,
                    "metadata": doc.metadata
                }
            }
            actions.append(action)

        # 批量提交
        from elasticsearch.helpers import bulk
        bulk(self.es_client, actions)

    def hybrid_search(self, query: str, k: int = 5) -> List[LCDocument]:
        """
        执行混合检索(关键词+语义)
        参数:
            query: 查询字符串
            k: 返回结果数量
        返回:
            List[LCDocument]: 检索结果文档列表
        """
        # 1. 关键词检索
        keyword_results = self._keyword_search(query, k)

        # 2. 语义检索
        vector_results = self._vector_search(query, k)

        # 3. 混合结果
        combined = self._combine_results(keyword_results, vector_results, k)

        return combined

    def _keyword_search(self, query: str, k: int) -> List[Dict]:
        """执行关键词检索"""
        body = {
            "query": {
                "multi_match": {
                    "query": query,
                    "fields": ["content"],
                    "type": "best_fields"
                }
            },
            "size": k
        }
        response = self.es_client.search(index=self.hybrid_index_name, body=body)
        return [hit["_source"] for hit in response["hits"]["hits"]]

    def _vector_search(self, query: str, k: int) -> List[Dict]:
        """执行向量检索"""
        # 生成查询向量
        query_embedding = self.embeddings.embed_query(query)

        body = {
            "knn": {
                "field": "embedding",
                "query_vector": query_embedding,
                "k": k,
                "num_candidates": 100
            },
            "size": k
        }
        response = self.es_client.search(index=self.hybrid_index_name, body=body)
        return [hit["_source"] for hit in response["hits"]["hits"]]

    def _combine_results(self,
                         keyword_results: List[Dict],
                         vector_results: List[Dict],
                         k: int) -> List[LCDocument]:
        """
        混合关键词和向量检索结果
        使用RRF(Reciprocal Rank Fusion)算法进行结果融合
        """
        # 创建结果映射
        all_results = {}

        # 处理关键词结果
        for rank, result in enumerate(keyword_results, 1):
            doc_id = result["metadata"]["paragraph_id"]
            if doc_id not in all_results:
                all_results[doc_id] = {
                    "content": result["content"],
                    "metadata": result["metadata"],
                    "keyword_rank": rank,
                    "vector_rank": float('inf')
                }

        # 处理向量结果
        for rank, result in enumerate(vector_results, 1):
            doc_id = result["metadata"]["paragraph_id"]
            if doc_id in all_results:
                all_results[doc_id]["vector_rank"] = rank
            else:
                all_results[doc_id] = {
                    "content": result["content"],
                    "metadata": result["metadata"],
                    "keyword_rank": float('inf'),
                    "vector_rank": rank
                }

        # 计算RRF分数
        def rrf_score(rank, k=60):
            return 1 / (rank + k)

        # 为每个文档计算综合分数
        scored_results = []
        for doc_id, doc_data in all_results.items():
            keyword_score = rrf_score(doc_data["keyword_rank"])
            vector_score = rrf_score(doc_data["vector_rank"])
            total_score = (1 - self.hybrid_search_ratio) * keyword_score + \
                          self.hybrid_search_ratio * vector_score

            scored_results.append({
                "doc": LCDocument(
                    page_content=doc_data["content"],
                    metadata=doc_data["metadata"]
                ),
                "score": total_score
            })

        # 按分数排序并返回前k个结果
        scored_results.sort(key=lambda x: x["score"], reverse=True)
        return [result["doc"] for result in scored_results[:k]]

    def add_contract_to_vectorstore(self, contract_data: Dict):
        """将生成的合同存入向量数据库"""
        try:
            # 准备元数据
            contract_info = contract_data.get("contract_info", {})
            metadata = {
                "type": "insurance_contract",
                "contract_id": contract_info.get("contract_id", ""),
                "applicant": contract_data.get("applicant_info", {}).get("name", ""),
                "insured": contract_data.get("insured_info", {}).get("name", ""),
                "product_type": contract_info.get("product_type", ""),
                "effective_date": contract_info.get("effective_date", ""),
                "expiry_date": contract_info.get("expiry_date", ""),
                "status": contract_info.get("status", "pending"),
                "stored_at": datetime.now().isoformat()
            }

            # 准备文本内容
            text_content = json.dumps(contract_data, ensure_ascii=False)

            # 存入向量数据库
            self.contract_store.add_texts(
                texts=[text_content],
                metadatas=[metadata]
            )

            return {
                "status": "success",
                "message": "合同已存入数据库",
                "contract_id": metadata["contract_id"],
                "stored_at": metadata["stored_at"]
            }
        except Exception as e:
            return {
                "status": "error",
                "message": f"合同存储失败: {str(e)}",
                "contract_id": "",
                "stored_at": ""
            }
    def search_contracts(self, query: str, k: int = 5) -> List[Dict]:
        """
        检索保险合同
        参数:
            query: 查询字符串
            k: 返回结果数量
        返回:
            List[Dict]: 检索结果列表
        """
        try:
            # 从Chroma检索
            chroma_results = self.contract_store.similarity_search(query, k=k)

            # 从ES检索
            es_results = self._es_search(
                query=query,
                index_name="insurance_contracts",
                k=k
            )

            # 合并结果
            combined = []
            seen_ids = set()

            # 添加Chroma结果
            for doc in chroma_results:
                contract_id = doc.metadata.get("contract_id", "")
                if contract_id and contract_id not in seen_ids:
                    combined.append({
                        "source": "chroma",
                        "content": doc.page_content,
                        "metadata": doc.metadata,
                        "score": None
                    })
                    seen_ids.add(contract_id)

            # 添加ES结果
            for hit in es_results:
                contract_id = hit["_source"]["metadata"].get("contract_id", "")
                if contract_id and contract_id not in seen_ids:
                    combined.append({
                        "source": "elasticsearch",
                        "content": hit["_source"]["content"],
                        "metadata": hit["_source"]["metadata"],
                        "score": hit["_score"]
                    })
                    seen_ids.add(contract_id)

            return combined[:k]

        except Exception as e:
            print(f"合同检索失败: {str(e)}")
            return []