from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity


class QuestionSimilaritySearch:
    def __init__(self,
                 es_host="172.16.226.131",
                 es_port=9200,
                 index_name="standard_questions",
                 username="elastic",
                 password="WlkO3gkQsjUvIPhamhqc"):
        """
        初始化相似性搜索引擎

        参数:
            es_host: Elasticsearch主机
            es_port: Elasticsearch端口
            index_name: 存储问题的索引名称
        """
        self.es = Elasticsearch([{'host': es_host, 'port': es_port}], http_auth=(username, password))
        self.index_name = index_name
        self.create_index_if_not_exists()

    def create_index_if_not_exists(self):
        """如果索引不存在则创建Elasticsearch索引"""
        if not self.es.indices.exists(index=self.index_name):
            # 定义索引映射，使用文本字段以便更好地搜索
            mapping = {
                "mappings": {
                    "properties": {
                        "question_id": {"type": "keyword"},
                        "question_text": {
                            "type": "text",
                            # 对于中文文本，使用IK分词器
                            "analyzer": "ik_smart",
                            "search_analyzer": "ik_smart",
                            # 存储原始文本
                            "fields": {
                                "keyword": {"type": "keyword", "ignore_above": 256}
                            }
                        },
                        "question_vector": {"type": "dense_vector", "dims": 100},
                        "category": {"type": "keyword"},
                        "tags": {"type": "keyword"}
                    }
                },
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0,
                    "analysis": {
                        "analyzer": {
                            "my_analyzer": {
                                "type": "custom",
                                "tokenizer": "standard",
                                "filter": ["lowercase", "stop"]
                            }
                        }
                    }
                }
            }
            self.es.indices.create(index=self.index_name, body=mapping)
            print(f"创建索引: {self.index_name}")

    def tokenize_text(self, text, is_chinese=True):
        """
        文本分词 - 默认使用jieba分词器处理中文

        参数:
            text: 要分词的输入文本
            is_chinese: 文本是否为中文

        返回:
            分词列表
        """
        if is_chinese:
            return list(jieba.cut(text))
        return text.split()

    def add_standard_questions(self, questions_data):
        """
        将标准问题添加到Elasticsearch索引

        参数:
            questions_data: 问题数据字典列表，包含键:
                            - question_id
                            - question_text
                            - category (可选)
                            - tags (可选, 列表)
        """
        # 为所有问题计算TF-IDF向量
        texts = [q["question_text"] for q in questions_data]
        tokenized_texts = [" ".join(self.tokenize_text(text)) for text in texts]

        vectorizer = TfidfVectorizer(max_features=100)
        tfidf_matrix = vectorizer.fit_transform(tokenized_texts).toarray()

        # 保证TF-IDF向量是100维（不够就补零）
        def pad_vector(vector, dim=100):
            if len(vector) < dim:
                return vector + [0.0] * (dim - len(vector))
            return vector[:dim]  # 万一大于100，截断

        # 准备批量索引的文档
        actions = []
        for i, q in enumerate(questions_data):
            doc = {
                "question_id": q["question_id"],
                "question_text": q["question_text"],
                "question_vector": pad_vector(tfidf_matrix[i].tolist(), dim=100),
                "category": q.get("category", ""),
                "tags": q.get("tags", [])
            }
            action = {
                "_index": self.index_name,
                "_id": q["question_id"],
                "_source": doc
            }
            actions.append(action)

        # 批量索引文档
        success, failed = bulk(self.es, actions)
        print(f"已索引 {success} 个文档, {len(failed)} 个失败")

        # 刷新索引，使文档立即可搜索
        self.es.indices.refresh(index=self.index_name)

    def search_similar_questions(self, query_text, top_k=5, min_score=0.1):
        """
        使用Elasticsearch搜索相似问题

        参数:
            query_text: 查询问题文本
            top_k: 返回的顶部相似问题数量
            min_score: 最小相似度分数阈值

        返回:
            带有分数的相似问题列表
        """
        # 第一种方法：使用Elasticsearch的文本相似度
        es_query = {
            "query": {
                "multi_match": {
                    "query": query_text,
                    "fields": ["question_text"],
                    "fuzziness": "AUTO"
                }
            },
            "size": top_k * 2  # 获取比需要更多的结果用于后处理过滤
        }

        response = self.es.search(index=self.index_name, body=es_query)

        # 提取结果
        candidates = []
        for hit in response["hits"]["hits"]:
            candidates.append({
                "question_id": hit["_source"]["question_id"],
                "question_text": hit["_source"]["question_text"],
                "category": hit["_source"].get("category", ""),
                "tags": hit["_source"].get("tags", []),
                "score": hit["_score"],
                "vector": hit["_source"]["question_vector"]
            })

        # 第二种方法：使用TF-IDF向量的余弦相似度重新排序
        if candidates:
            # 计算查询的TF-IDF向量
            query_tokens = " ".join(self.tokenize_text(query_text))
            vectorizer = TfidfVectorizer(max_features=100)

            # 使用现有文档拟合向量化器
            corpus = [" ".join(self.tokenize_text(c["question_text"])) for c in candidates]
            corpus.insert(0, query_tokens)  # 在开头添加查询

            tfidf_matrix = vectorizer.fit_transform(corpus).toarray()
            query_vector = tfidf_matrix[0]

            # 计算查询与每个候选项之间的余弦相似度
            for i, candidate in enumerate(candidates):
                similarity = cosine_similarity([query_vector], [tfidf_matrix[i + 1]])[0][0]
                candidate["cosine_similarity"] = similarity

            # 按余弦相似度重新排序
            candidates.sort(key=lambda x: x["cosine_similarity"], reverse=True)

            # 根据最小分数过滤
            candidates = [c for c in candidates if c["cosine_similarity"] >= min_score]

            # 限制为top_k结果
            candidates = candidates[:top_k]

        return candidates

    def delete_index(self):
        """删除Elasticsearch索引"""
        if self.es.indices.exists(index=self.index_name):
            self.es.indices.delete(index=self.index_name)
            print(f"已删除索引: {self.index_name}")


# 使用示例
if __name__ == "__main__":
    # 初始化搜索引擎
    search_engine = QuestionSimilaritySearch()

    # 示例标准问题
    standard_questions = [
        {
            "question_id": "q1",
            "question_text": "如何重置我的账户密码？",
            "category": "account",
            "tags": ["password", "reset"]
        },
        {
            "question_id": "q2",
            "question_text": "账户密码忘记了怎么办？",
            "category": "account",
            "tags": ["password", "forgot"]
        },
        {
            "question_id": "q3",
            "question_text": "如何更改我的登录密码？",
            "category": "account",
            "tags": ["password", "change"]
        },
        {
            "question_id": "q4",
            "question_text": "订单退款需要多长时间？",
            "category": "payment",
            "tags": ["refund", "order"]
        },
        {
            "question_id": "q5",
            "question_text": "我的退款什么时候能到账？",
            "category": "payment",
            "tags": ["refund", "timing"]
        }
    ]

    # 将问题添加到索引
    # search_engine.add_standard_questions(standard_questions)

    # 搜索相似问题
    query = "我的密码忘了，怎么重新设置"
    similar_questions = search_engine.search_similar_questions(query, top_k=3)

    print(f"查询: {query}")
    print("相似问题:")
    for i, q in enumerate(similar_questions):
        print(f"{i + 1}. {q['question_text']} (分数: {q['cosine_similarity']:.4f})")
