import json
import os
import numpy as np
import jieba
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans
from zhipuai import ZhipuAI



class QuestionSimilaritySearch:
    def __init__(self, es_hosts=["http://172.16.226.131:9200"],
                 username="elastic",
                 password="WlkO3gkQsjUvIPhamhqc", index_name="standard_questions",
                 vector_dim=512, batch_size=100):
        """
        初始化高级相似性搜索引擎

        参数:
            es_host: Elasticsearch主机
            es_port: Elasticsearch端口
            index_name: 存储问题的索引名称
            vector_dim: 向量维度(Deepseek默认1536)
            batch_size: 批量处理大小
            deepseek_api_key: Deepseek API密钥
            deepseek_api_url: Deepseek API URL
        """
        self.es = Elasticsearch(es_hosts, http_auth=(username, password))
        self.index_name = index_name
        self.vector_dim = vector_dim
        self.batch_size = batch_size
        self.stopwords = self._load_stopwords()
        # 创建索引
        self.create_index_if_not_exists()
        self.glm_api_key =  "e0419f348adc2d6c02d1928f29519bd7.Gnlz8d8EhVLhALlN"

    def _load_stopwords(self):
        """加载停用词列表"""
        stopwords = set()
        try:
            # 尝试加载停用词表文件，如果不存在则使用默认简单列表
            with open('stopwords.txt', 'r', encoding='utf-8') as f:
                for line in f:
                    stopwords.add(line.strip())
        except FileNotFoundError:
            # 默认停用词列表
            default_stopwords = ['的', '了', '和', '是', '在', '我', '有', '不', '这', '那', '就', '都']
            stopwords = set(default_stopwords)
        return stopwords

    def create_index_if_not_exists(self):
        """如果索引不存在则创建Elasticsearch索引"""
        if not self.es.indices.exists(index=self.index_name):
            # 定义索引映射，使用文本字段以便更好地搜索
            mapping = {
                "mappings": {
                    "properties": {
                        "question_id": {"type": "keyword"},
                        "question_text": {
                            "type": "text",
                            # 对于中文文本，使用IK分词器
                            "analyzer": "ik_smart",
                            "search_analyzer": "ik_smart",
                            # 存储原始文本
                            "fields": {
                                "keyword": {"type": "keyword", "ignore_above": 256}
                            }
                        },
                        "question_vector": {"type": "dense_vector", "dims": self.vector_dim},
                        "category": {"type": "keyword"},
                        "tags": {"type": "keyword"},
                        "synonyms": {"type": "text", "analyzer": "ik_smart"},
                        "cluster_id": {"type": "integer"}
                    }
                },
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0,
                    "analysis": {
                        "analyzer": {
                            "my_analyzer": {
                                "type": "custom",
                                "tokenizer": "standard",
                                "filter": ["lowercase", "stop"]
                            }
                        }
                    }
                }
            }
            self.es.indices.create(index=self.index_name, body=mapping)
            print(f"创建索引: {self.index_name}")

    def tokenize_text(self, text, is_chinese=True, remove_stopwords=True):
        """
        文本分词 - 默认使用jieba分词器处理中文

        参数:
            text: 要分词的输入文本
            is_chinese: 文本是否为中文
            remove_stopwords: 是否移除停用词

        返回:
            分词列表
        """
        if is_chinese:
            tokens = list(jieba.cut(text))
        else:
            tokens = text.split()

        if remove_stopwords:
            tokens = [t for t in tokens if t not in self.stopwords]

        return tokens

    def get_gml_embedding(self, texts):
        """
            调用GML API获取文本嵌入向量
            参数:
                texts: 单个文本或文本列表
            返回:
                嵌入向量列表
            """
        if self.glm_api_key is None:
            raise ValueError("缺少 API密钥")

        # 确保texts是列表
        if isinstance(texts, str):
            texts = [texts]

        client = ZhipuAI(api_key=self.glm_api_key)
        response = client.embeddings.create(
            model="embedding-3",
            dimensions=512,
            input=texts,
        )
        return [data.embedding for data in response.data]

    def expand_query(self, query_text, expansion_method='synonyms', top_n=3):
        """
        查询扩展 - 为查询添加同义词或相关词

        参数:
            query_text: 原始查询文本
            expansion_method: 扩展方法 ('synonyms')
            top_n: 每个词添加的同义词/相关词数量

        返回:
            扩展后的查询文本
        """
        tokens = self.tokenize_text(query_text)
        expanded_tokens = tokens.copy()

        if expansion_method == 'synonyms':
            # 这里简化了同义词获取过程，实际使用中需要集成合适的中文同义词库
            # 例如可以使用HIT同义词词林或其他资源
            # 此处仅作为占位符，实际实现需根据项目需求选择合适的同义词资源
            pass

        # 去重
        expanded_tokens = list(set(expanded_tokens))
        expanded_query = ' '.join(expanded_tokens)

        return expanded_query

    def get_synonyms_for_questions(self, questions_data):
        """
        为问题生成同义词（简化版，实际使用时需替换为适当的同义词库）

        参数:
            questions_data: 问题数据字典列表

        返回:
            带有同义词的问题数据
        """
        for q in questions_data:
            # 分词
            tokens = self.tokenize_text(q["question_text"])
            # 实际应用中应集成合适的同义词库
            q["synonyms"] = tokens

        return questions_data

    def cluster_questions(self, questions_data, vectors, n_clusters=5):
        """
        对问题进行聚类

        参数:
            questions_data: 问题数据字典列表
            vectors: 向量列表
            n_clusters: 聚类数量

        返回:
            带有聚类ID的问题数据
        """
        # 确保向量是numpy数组
        vectors = np.array(vectors)

        # 如果问题数量少于聚类数量，调整聚类数量
        actual_n_clusters = min(n_clusters, len(questions_data))

        if len(questions_data) > 1:
            # 使用K均值聚类
            kmeans = KMeans(n_clusters=actual_n_clusters, random_state=42)
            clusters = kmeans.fit_predict(vectors)

            # 将聚类ID添加到问题数据中
            for i, q in enumerate(questions_data):
                q["cluster_id"] = int(clusters[i])
        else:
            # 只有一个问题时，设置为0号聚类
            questions_data[0]["cluster_id"] = 0

        return questions_data

    def add_standard_questions(self, questions_data, cluster_questions=True):
        """
        将标准问题批量添加到Elasticsearch索引

        参数:
            questions_data: 问题数据字典列表，包含键:
                            - question_id
                            - question_text
                            - category (可选)
                            - tags (可选, 列表)
            cluster_questions: 是否对问题进行聚类
        """
        total_questions = len(questions_data)
        print(f"开始处理 {total_questions} 个问题")

        # 按批次处理
        for i in range(0, total_questions, self.batch_size):
            batch = questions_data[i:i + self.batch_size]
            batch_size = len(batch)
            print(f"处理批次 {i // self.batch_size + 1}, 共 {batch_size} 个问题")

            # 提取文本用于向量化
            texts = [q["question_text"] for q in batch]

            # 获取嵌入向量
            print("获取GML嵌入向量...")
            vectors = self.get_gml_embedding(texts)

            # 添加同义词
            batch = self.get_synonyms_for_questions(batch)

            # 如果需要，进行聚类
            if cluster_questions and batch_size > 1:
                batch = self.cluster_questions(batch, vectors)
            elif cluster_questions:
                # 单个问题情况
                batch[0]["cluster_id"] = 0

            # 准备批量索引的文档
            actions = []
            for j, q in enumerate(batch):
                doc = {
                    "question_id": q["question_id"],
                    "question_text": q["question_text"],
                    "question_vector": vectors[j],
                    "category": q.get("category", ""),
                    "tags": q.get("tags", []),
                    "synonyms": q.get("synonyms", []),
                    "cluster_id": q.get("cluster_id", 0)
                }

                action = {
                    "_index": self.index_name,
                    "_id": q["question_id"],
                    "_source": doc
                }
                actions.append(action)

            # 批量索引文档
            if actions:
                success, failed = bulk(self.es, actions)
                print(f"已索引 {success} 个文档, {len(failed) if failed else 0} 个失败")

            # 刷新索引，使文档立即可搜索
            self.es.indices.refresh(index=self.index_name)

            # 打印进度
            processed = min(i + self.batch_size, total_questions)
            print(f"已处理 {processed}/{total_questions} 个问题 ({processed / total_questions * 100:.1f}%)")

    def search_similar_questions(self, query_text, top_k=5, min_score=0.5,
                                 expand_query=True, search_method='hybrid'):
        """
        使用高级方法搜索相似问题

        参数:
            query_text: 查询问题文本
            top_k: 返回的顶部相似问题数量
            min_score: 最小相似度分数阈值
            expand_query: 是否扩展查询
            search_method: 搜索方法 ('elasticsearch', 'vector', 'hybrid')

        返回:
            带有分数的相似问题列表
        """
        # 查询扩展
        if expand_query:
            expanded_query = self.expand_query(query_text)
            print(f"原始查询: {query_text}")
            print(f"扩展查询: {expanded_query}")
        else:
            expanded_query = query_text

        candidates = []

        # 方法1: Elasticsearch全文搜索
        if search_method in ['elasticsearch', 'hybrid']:
            es_query = {
                "query": {
                    "bool": {
                        "should": [
                            {
                                "multi_match": {
                                    "query": expanded_query,
                                    "fields": ["question_text", "synonyms"],
                                    "fuzziness": "AUTO",
                                    "boost": 2.0
                                }
                            },
                            {
                                "match": {
                                    "question_text": {
                                        "query": query_text,
                                        "boost": 1.0
                                    }
                                }
                            }
                        ]
                    }
                },
                "size": top_k * 2  # 获取比需要更多的结果用于后处理过滤
            }

            response = self.es.search(index=self.index_name, body=es_query)

            # 提取结果
            for hit in response["hits"]["hits"]:
                candidates.append({
                    "question_id": hit["_source"]["question_id"],
                    "question_text": hit["_source"]["question_text"],
                    "category": hit["_source"].get("category", ""),
                    "tags": hit["_source"].get("tags", []),
                    "cluster_id": hit["_source"].get("cluster_id", 0),
                    "score": hit["_score"],
                    "vector": hit["_source"]["question_vector"]
                })

        # 方法2: 向量相似度搜索
        if search_method in ['vector', 'hybrid']:
            # 获取查询向量
            query_vector = self.get_gml_embedding(query_text)[0]

            # 如果是混合搜索方法并且已有候选项，则只与现有候选项比较向量相似度
            if search_method == 'hybrid' and candidates:
                for c in candidates:
                    doc_vector = c["vector"]
                    similarity = cosine_similarity([query_vector], [doc_vector])[0][0]
                    # 更新分数为ES分数和向量相似度的加权平均
                    c["score"] = 0.5 * c["score"] + 0.5 * similarity

                # 重新排序
                candidates.sort(key=lambda x: x["score"], reverse=True)
            else:
                # 直接向量搜索
                script_query = {
                    "script_score": {
                        "query": {"match_all": {}},
                        "script": {
                            "source": "cosineSimilarity(params.query_vector, 'question_vector') + 1.0",
                            "params": {"query_vector": query_vector}
                        }
                    }
                }

                response = self.es.search(
                    index=self.index_name,
                    body={
                        "size": top_k,
                        "query": script_query,
                        "_source": ["question_id", "question_text", "category", "tags", "cluster_id", "question_vector"]
                    }
                )

                candidates = []
                for hit in response["hits"]["hits"]:
                    candidates.append({
                        "question_id": hit["_source"]["question_id"],
                        "question_text": hit["_source"]["question_text"],
                        "category": hit["_source"].get("category", ""),
                        "tags": hit["_source"].get("tags", []),
                        "cluster_id": hit["_source"].get("cluster_id", 0),
                        "score": hit["_score"] - 1.0,  # 恢复余弦相似度的原始范围(-1到1)
                        "vector": hit["_source"]["question_vector"]
                    })

        # 根据聚类进行结果增强
        if candidates:
            # 找出最常见的聚类
            cluster_counts = {}
            for c in candidates:
                cluster_id = c.get("cluster_id", 0)
                if cluster_id in cluster_counts:
                    cluster_counts[cluster_id] += 1
                else:
                    cluster_counts[cluster_id] = 1

            # 获取最常见的聚类ID
            if cluster_counts:
                most_common_cluster = max(cluster_counts.items(), key=lambda x: x[1])[0]

                # 为相同聚类的结果增加权重
                for c in candidates:
                    if c.get("cluster_id", 0) == most_common_cluster:
                        c["score"] *= 1.2  # 增加20%的权重

            # 按分数重新排序
            candidates.sort(key=lambda x: x["score"], reverse=True)

            # 根据最小分数过滤
            candidates = [c for c in candidates if c["score"] >= min_score]

            # 限制为top_k结果
            candidates = candidates[:top_k]

        return candidates

    def delete_index(self):
        """删除Elasticsearch索引"""
        if self.es.indices.exists(index=self.index_name):
            self.es.indices.delete(index=self.index_name)
            print(f"已删除索引: {self.index_name}")

    def load_questions_from_file(self, file_path):
        """
        从文件加载问题数据

        参数:
            file_path: 文件路径，支持JSON格式

        返回:
            问题数据字典列表
        """
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")

        _, ext = os.path.splitext(file_path)

        if ext.lower() == '.json':
            with open(file_path, 'r', encoding='utf-8') as f:
                questions_data = json.load(f)
            return questions_data
        else:
            raise ValueError(f"不支持的文件格式: {ext}")


# 使用示例
if __name__ == "__main__":
    # 初始化搜索引擎，需要提供Deepseek API密钥
    search_engine = QuestionSimilaritySearch(
        batch_size=50  # 每批处理50个问题
    )

    # 示例：从文件加载问题
    # questions_data = search_engine.load_questions_from_file("questions.json")

    # # 第一批问题
    first_batch  = [
        {
            "question_id": "q1",
            "question_text": "如何重置我的账户密码？",
            "category": "account",
            "tags": ["password", "reset"]
        },
        {
            "question_id": "q2",
            "question_text": "账户密码忘记了怎么办？",
            "category": "account",
            "tags": ["password", "forgot"]
        },
        {
            "question_id": "q3",
            "question_text": "如何更改我的登录密码？",
            "category": "account",
            "tags": ["password", "change"]
        },
        {
            "question_id": "q4",
            "question_text": "忘记密码后如何找回？",
            "category": "account",
            "tags": ["password", "recovery"]
        },
        {
            "question_id": "q5",
            "question_text": "我想修改我的密码，怎么操作？",
            "category": "account",
            "tags": ["password", "modify"]
        }

    ]

    # 将问题批量添加到索引
    # search_engine.add_standard_questions(first_batch)

    # 第二批问题
    second_batch = [
        {
            "question_id": "q6",
            "question_text": "订单退款需要多长时间？",
            "category": "payment",
            "tags": ["refund", "order", "time"]
        },
        {
            "question_id": "q7",
            "question_text": "我的退款什么时候能到账？",
            "category": "payment",
            "tags": ["refund", "timing"]
        },
        {
            "question_id": "q8",
            "question_text": "退款流程需要走哪些步骤？",
            "category": "payment",
            "tags": ["refund", "process"]
        },
        {
            "question_id": "q9",
            "question_text": "如何查询我的退款状态？",
            "category": "payment",
            "tags": ["refund", "status", "check"]
        },
        {
            "question_id": "q10",
            "question_text": "取消订单后钱会自动退回吗？",
            "category": "payment",
            "tags": ["refund", "cancel", "automatic"]
        }]
    # search_engine.add_standard_questions(second_batch)

    # 测试查询
    # query = "我的密码忘了，怎么重新设置"
    query = "账户密码忘记了怎么办？"
    similar_questions = search_engine.search_similar_questions(
        query,
        top_k=5,
        search_method='hybrid'
    )

    print(f"查询: {query}")
    print("相似问题:")
    for i, q in enumerate(similar_questions):
        print(f"{i + 1}. {q['question_text']} (分数: {q['score']:.4f}, 聚类: {q['cluster_id']})")
