import json
import os
import numpy as np
import jieba
import gensim
import synonyms  # 中文同义词库
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from gensim.models import Word2Vec, KeyedVectors
from gensim.modepils.callbacks import CallbackAny2Vec
import torch
from sentence_transformers import SentenceTransformer
from scipy.spatial.distance import pdist, squareform
from sklearn.cluster import KMeans


class QuestionSimilaritySearch:
    def __init__(self, es_host="localhost", es_port=9200, index_name="standard_questions",
                 embedding_model="sentence-transformer", vector_dim=384):
        """
        初始化高级相似性搜索引擎

        参数:
            es_host: Elasticsearch主机
            es_port: Elasticsearch端口
            index_name: 存储问题的索引名称
            embedding_model: 使用的嵌入模型类型 ('word2vec', 'fasttext', 'sentence-transformer')
            vector_dim: 向量维度
        """
        self.es = Elasticsearch([{'host': es_host, 'port': es_port}])
        self.index_name = index_name
        self.embedding_model_type = embedding_model
        self.vector_dim = vector_dim
        self.embedding_model = None
        self.word2vec_model = None
        self.stopwords = self._load_stopwords()

        # 初始化嵌入模型
        self._init_embedding_model()

        # 创建索引
        self.create_index_if_not_exists()

    def _load_stopwords(self):
        """加载停用词列表"""
        stopwords = set()
        try:
            # 尝试加载停用词表文件，如果不存在则使用默认简单列表
            with open('stopwords.txt', 'r', encoding='utf-8') as f:
                for line in f:
                    stopwords.add(line.strip())
        except FileNotFoundError:
            # 默认停用词列表
            default_stopwords = ['的', '了', '和', '是', '在', '我', '有', '不', '这', '那', '就', '都']
            stopwords = set(default_stopwords)
        return stopwords

    def _init_embedding_model(self):
        """初始化词嵌入模型"""
        if self.embedding_model_type == 'word2vec':
            # 检查是否有预训练模型
            if os.path.exists('word2vec_model.bin'):
                self.word2vec_model = Word2Vec.load('word2vec_model.bin')
                print("加载预训练Word2Vec模型")
            else:
                print("未找到Word2Vec模型，将在添加问题时训练")
                self.word2vec_model = None

        elif self.embedding_model_type == 'sentence-transformer':
            # 使用预训练的中文句向量模型
            try:
                self.embedding_model = SentenceTransformer('distiluse-base-multilingual-cased')
                print("加载Sentence-Transformer模型")
            except Exception as e:
                print(f"加载Sentence-Transformer模型失败: {e}")
                print("将退回到TF-IDF向量化")
                self.embedding_model_type = 'tfidf'

    def create_index_if_not_exists(self):
        """如果索引不存在则创建Elasticsearch索引"""
        if not self.es.indices.exists(index=self.index_name):
            # 定义索引映射，使用文本字段以便更好地搜索
            mapping = {
                "mappings": {
                    "properties": {
                        "question_id": {"type": "keyword"},
                        "question_text": {
                            "type": "text",
                            # 对于中文文本，使用IK分词器
                            "analyzer": "ik_smart",
                            "search_analyzer": "ik_smart",
                            # 存储原始文本
                            "fields": {
                                "keyword": {"type": "keyword", "ignore_above": 256}
                            }
                        },
                        "question_vector": {"type": "dense_vector", "dims": self.vector_dim},
                        "category": {"type": "keyword"},
                        "tags": {"type": "keyword"},
                        "synonyms": {"type": "text", "analyzer": "ik_smart"},
                        "cluster_id": {"type": "integer"}
                    }
                },
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0,
                    "analysis": {
                        "analyzer": {
                            "my_analyzer": {
                                "type": "custom",
                                "tokenizer": "standard",
                                "filter": ["lowercase", "stop"]
                            }
                        }
                    }
                }
            }
            self.es.indices.create(index=self.index_name, body=mapping)
            print(f"创建索引: {self.index_name}")

    def tokenize_text(self, text, is_chinese=True, remove_stopwords=True):
        """
        文本分词 - 默认使用jieba分词器处理中文

        参数:
            text: 要分词的输入文本
            is_chinese: 文本是否为中文
            remove_stopwords: 是否移除停用词

        返回:
            分词列表
        """
        if is_chinese:
            tokens = list(jieba.cut(text))
        else:
            tokens = text.split()

        if remove_stopwords:
            tokens = [t for t in tokens if t not in self.stopwords]

        return tokens

    def expand_query(self, query_text, expansion_method='synonyms', top_n=3):
        """
        查询扩展 - 为查询添加同义词或相关词

        参数:
            query_text: 原始查询文本
            expansion_method: 扩展方法 ('synonyms', 'word2vec', 'both')
            top_n: 每个词添加的同义词/相关词数量

        返回:
            扩展后的查询文本
        """
        tokens = self.tokenize_text(query_text)
        expanded_tokens = tokens.copy()

        if expansion_method in ['synonyms', 'both']:
            # 使用synonyms库获取中文同义词
            for token in tokens:
                try:
                    # 获取同义词及其相似度
                    syns, scores = synonyms.nearby(token, top_n)
                    expanded_tokens.extend(syns)
                except:
                    continue

        if expansion_method in ['word2vec', 'both'] and self.word2vec_model:
            # 使用word2vec模型获取相似词
            for token in tokens:
                try:
                    similar_words = [word for word, score in
                                     self.word2vec_model.wv.most_similar(token, topn=top_n)]
                    expanded_tokens.extend(similar_words)
                except:
                    continue

        # 去重
        expanded_tokens = list(set(expanded_tokens))
        expanded_query = ' '.join(expanded_tokens)

        return expanded_query

    def get_embedding(self, text, texts=None):
        """
        获取文本的嵌入向量

        参数:
            text: 输入文本
            texts: 用于TF-IDF的文本语料库（可选）

        返回:
            嵌入向量
        """
        if self.embedding_model_type == 'sentence-transformer' and self.embedding_model:
            # 使用句子变换器模型
            return self.embedding_model.encode(text)

        elif self.embedding_model_type == 'word2vec' and self.word2vec_model:
            # 使用Word2Vec模型
            tokens = self.tokenize_text(text)
            word_vectors = []
            for token in tokens:
                try:
                    word_vectors.append(self.word2vec_model.wv[token])
                except:
                    continue

            if word_vectors:
                # 计算平均词向量
                return np.mean(word_vectors, axis=0)
            else:
                # 如果没有有效词向量，返回零向量
                return np.zeros(self.word2vec_model.vector_size)

        else:
            # 退回到TF-IDF
            if texts is None:
                texts = [text]

            vectorizer = TfidfVectorizer(max_features=self.vector_dim)
            tfidf_matrix = vectorizer.fit_transform(texts).toarray()
            return tfidf_matrix[0]

    def train_word2vec(self, texts, vector_size=100, window=5, min_count=1, workers=4, epochs=10):
        """
        训练Word2Vec模型

        参数:
            texts: 文本列表
            vector_size: 向量维度
            window: 上下文窗口大小
            min_count: 词频阈值
            workers: 训练线程数
            epochs: 训练轮数
        """
        # 准备训练数据
        sentences = [self.tokenize_text(text) for text in texts]

        # 训练进度回调
        class EpochLogger(CallbackAny2Vec):
            def __init__(self):
                self.epoch = 0

            def on_epoch_end(self, model):
                self.epoch += 1
                print(f"Word2Vec训练轮数 {self.epoch}/{epochs}")

        epoch_logger = EpochLogger()

        # 训练模型
        self.word2vec_model = Word2Vec(
            sentences=sentences,
            vector_size=vector_size,
            window=window,
            min_count=min_count,
            workers=workers,
            epochs=epochs,
            callbacks=[epoch_logger]
        )

        # 保存模型
        self.word2vec_model.save('word2vec_model.bin')
        print("Word2Vec模型训练完成并保存")

    def cluster_questions(self, questions_data, n_clusters=5):
        """
        对问题进行聚类

        参数:
            questions_data: 问题数据字典列表
            n_clusters: 聚类数量

        返回:
            带有聚类ID的问题数据
        """
        texts = [q["question_text"] for q in questions_data]

        # 获取所有问题的嵌入向量
        embeddings = []
        for text in texts:
            embeddings.append(self.get_embedding(text, texts))

        # 转为numpy数组
        embeddings = np.array(embeddings)

        # 使用K均值聚类
        kmeans = KMeans(n_clusters=n_clusters, random_state=42)
        clusters = kmeans.fit_predict(embeddings)

        # 将聚类ID添加到问题数据中
        for i, q in enumerate(questions_data):
            q["cluster_id"] = int(clusters[i])

        return questions_data

    def get_synonyms_for_questions(self, questions_data):
        """
        为问题生成同义词

        参数:
            questions_data: 问题数据字典列表

        返回:
            带有同义词的问题数据
        """
        for q in questions_data:
            # 分词
            tokens = self.tokenize_text(q["question_text"])
            all_synonyms = []

            # 为每个词获取同义词
            for token in tokens:
                try:
                    # 获取前3个同义词
                    syns, _ = synonyms.nearby(token, 3)
                    all_synonyms.extend(syns)
                except:
                    continue

            # 去重并保存
            q["synonyms"] = list(set(all_synonyms))

        return questions_data

    def add_standard_questions(self, questions_data, train_word2vec=True, cluster_questions=True):
        """
        将标准问题添加到Elasticsearch索引

        参数:
            questions_data: 问题数据字典列表，包含键:
                            - question_id
                            - question_text
                            - category (可选)
                            - tags (可选, 列表)
            train_word2vec: 是否训练Word2Vec模型
            cluster_questions: 是否对问题进行聚类
        """
        # 提取文本用于训练或向量化
        texts = [q["question_text"] for q in questions_data]

        # 如果需要，训练Word2Vec模型
        if self.embedding_model_type == 'word2vec' and train_word2vec:
            self.train_word2vec(texts)

        # 添加同义词
        questions_data = self.get_synonyms_for_questions(questions_data)

        # 如果需要，进行聚类
        if cluster_questions:
            questions_data = self.cluster_questions(questions_data)

        # 准备批量索引的文档
        actions = []
        for q in questions_data:
            # 获取问题的嵌入向量
            vector = self.get_embedding(q["question_text"], texts).tolist()

            doc = {
                "question_id": q["question_id"],
                "question_text": q["question_text"],
                "question_vector": vector,
                "category": q.get("category", ""),
                "tags": q.get("tags", []),
                "synonyms": q.get("synonyms", []),
                "cluster_id": q.get("cluster_id", 0)
            }

            action = {
                "_index": self.index_name,
                "_id": q["question_id"],
                "_source": doc
            }
            actions.append(action)

        # 批量索引文档
        success, failed = bulk(self.es, actions)
        print(f"已索引 {success} 个文档, {len(failed)} 个失败")

        # 刷新索引，使文档立即可搜索
        self.es.indices.refresh(index=self.index_name)

    def search_similar_questions(self, query_text, top_k=5, min_score=0.5,
                                 expand_query=True, expand_method='both',
                                 search_method='hybrid'):
        """
        使用高级方法搜索相似问题

        参数:
            query_text: 查询问题文本
            top_k: 返回的顶部相似问题数量
            min_score: 最小相似度分数阈值
            expand_query: 是否扩展查询
            expand_method: 查询扩展方法 ('synonyms', 'word2vec', 'both')
            search_method: 搜索方法 ('elasticsearch', 'vector', 'hybrid')

        返回:
            带有分数的相似问题列表
        """
        # 查询扩展
        if expand_query:
            expanded_query = self.expand_query(query_text, expansion_method=expand_method)
            print(f"原始查询: {query_text}")
            print(f"扩展查询: {expanded_query}")
        else:
            expanded_query = query_text

        candidates = []

        # 方法1: Elasticsearch全文搜索
        if search_method in ['elasticsearch', 'hybrid']:
            es_query = {
                "query": {
                    "bool": {
                        "should": [
                            {
                                "multi_match": {
                                    "query": expanded_query,
                                    "fields": ["question_text", "synonyms"],
                                    "fuzziness": "AUTO",
                                    "boost": 2.0
                                }
                            },
                            {
                                "match": {
                                    "question_text": {
                                        "query": query_text,
                                        "boost": 1.0
                                    }
                                }
                            }
                        ]
                    }
                },
                "size": top_k * 2  # 获取比需要更多的结果用于后处理过滤
            }

            response = self.es.search(index=self.index_name, body=es_query)

            # 提取结果
            for hit in response["hits"]["hits"]:
                candidates.append({
                    "question_id": hit["_source"]["question_id"],
                    "question_text": hit["_source"]["question_text"],
                    "category": hit["_source"].get("category", ""),
                    "tags": hit["_source"].get("tags", []),
                    "cluster_id": hit["_source"].get("cluster_id", 0),
                    "score": hit["_score"],
                    "vector": hit["_source"]["question_vector"]
                })

        # 方法2: 向量相似度搜索
        if search_method in ['vector', 'hybrid'] and self.embedding_model_type != 'tfidf':
            # 获取所有问题
            es_query = {
                "query": {
                    "match_all": {}
                },
                "size": 1000  # 设置一个较大的值，实际应用中应该分页
            }

            response = self.es.search(index=self.index_name, body=es_query)

            # 获取查询向量
            query_vector = self.get_embedding(query_text)

            # 计算与所有文档的相似度
            vector_candidates = []
            for hit in response["hits"]["hits"]:
                doc_vector = np.array(hit["_source"]["question_vector"])
                similarity = cosine_similarity([query_vector], [doc_vector])[0][0]

                vector_candidates.append({
                    "question_id": hit["_source"]["question_id"],
                    "question_text": hit["_source"]["question_text"],
                    "category": hit["_source"].get("category", ""),
                    "tags": hit["_source"].get("tags", []),
                    "cluster_id": hit["_source"].get("cluster_id", 0),
                    "score": float(similarity),
                    "vector": hit["_source"]["question_vector"]
                })

            # 按相似度排序
            vector_candidates.sort(key=lambda x: x["score"], reverse=True)
            vector_candidates = vector_candidates[:top_k]

            # 如果是混合模式，合并结果
            if search_method == 'hybrid':
                # 添加向量搜索结果（避免重复）
                existing_ids = {c["question_id"] for c in candidates}
                for vc in vector_candidates:
                    if vc["question_id"] not in existing_ids:
                        candidates.append(vc)
                        existing_ids.add(vc["question_id"])
            else:
                candidates = vector_candidates

        # 根据聚类进行结果增强
        if candidates:
            # 找出最常见的聚类
            cluster_counts = {}
            for c in candidates:
                cluster_id = c.get("cluster_id", 0)
                if cluster_id in cluster_counts:
                    cluster_counts[cluster_id] += 1
                else:
                    cluster_counts[cluster_id] = 1

            # 获取最常见的聚类ID
            if cluster_counts:
                most_common_cluster = max(cluster_counts.items(), key=lambda x: x[1])[0]

                # 为相同聚类的结果增加权重
                for c in candidates:
                    if c.get("cluster_id", 0) == most_common_cluster:
                        c["score"] *= 1.2  # 增加20%的权重

            # 按分数重新排序
            candidates.sort(key=lambda x: x["score"], reverse=True)

            # 根据最小分数过滤
            candidates = [c for c in candidates if c["score"] >= min_score]

            # 限制为top_k结果
            candidates = candidates[:top_k]

        return candidates

    def delete_index(self):
        """删除Elasticsearch索引"""
        if self.es.indices.exists(index=self.index_name):
            self.es.indices.delete(index=self.index_name)
            print(f"已删除索引: {self.index_name}")


# 使用示例
if __name__ == "__main__":
    # 初始化高级搜索引擎
    search_engine = QuestionSimilaritySearch(embedding_model="sentence-transformer")

    # 示例标准问题
    standard_questions = [
        {
            "question_id": "q1",
            "question_text": "如何重置我的账户密码？",
            "category": "account",
            "tags": ["password", "reset"]
        },
        {
            "question_id": "q2",
            "question_text": "账户密码忘记了怎么办？",
            "category": "account",
            "tags": ["password", "forgot"]
        },
        {
            "question_id": "q3",
            "question_text": "如何更改我的登录密码？",
            "category": "account",
            "tags": ["password", "change"]
        },
        {
            "question_id": "q4",
            "question_text": "忘记密码后如何找回？",
            "category": "account",
            "tags": ["password", "recovery"]
        },
        {
            "question_id": "q5",
            "question_text": "我想修改我的密码，怎么操作？",
            "category": "account",
            "tags": ["password", "modify"]
        },
        {
            "question_id": "q6",
            "question_text": "订单退款需要多长时间？",
            "category": "payment",
            "tags": ["refund", "order", "time"]
        },
        {
            "question_id": "q7",
            "question_text": "我的退款什么时候能到账？",
            "category": "payment",
            "tags": ["refund", "timing"]
        },
        {
            "question_id": "q8",
            "question_text": "退款流程需要走哪些步骤？",
            "category": "payment",
            "tags": ["refund", "process"]
        },
        {
            "question_id": "q9",
            "question_text": "如何查询我的退款状态？",
            "category": "payment",
            "tags": ["refund", "status", "check"]
        },
        {
            "question_id": "q10",
            "question_text": "取消订单后钱会自动退回吗？",
            "category": "payment",
            "tags": ["refund", "cancel", "automatic"]
        }
    ]

    # 将问题添加到索引
    search_engine.add_standard_questions(standard_questions)

    # 测试查询扩展
    query = "我的密码忘了，怎么重新设置"
    similar_questions = search_engine.search_similar_questions(
        query,
        top_k=5,
        expand_query=True,
        expand_method='both',
        search_method='hybrid'
    )

    print(f"查询: {query}")
    print("相似问题:")
    for i, q in enumerate(similar_questions):
        print(f"{i + 1}. {q['question_text']} (分数: {q['score']:.4f}, 聚类: {q['cluster_id']})")