"""
需要用到向量模型了
"""
import difflib
import numpy as np
from nl2sql.similarity.base import SimilarityBase
from nl2sql.model.embedding_model import EmbeddingModel
from typing import *


class CosineSimilarity(SimilarityBase):
    """
    Cosine 相似度算法实现，基于嵌入模型
    """

    def __init__(self, embedding_model: EmbeddingModel):
        """
        初始化 CosineSimilarity 类
        :param embedding_model: 用于将文本转换为向量的嵌入模型
        """
        self.embedding_model = embedding_model

    def get_similarity_score(self, str1: str, str2: str, alpha: float = 0.8) -> float:
        """
        计算两个字符串的混合相似度分数（智能语义版：强嵌入向量+字符相似度融合）

        :param str1: 第一个字符串
        :param str2: 第二个字符串
        :param alpha: 向量相似度权重，字符相似度占(1-alpha)
        :return: 最终融合相似度分数
        """

        # 1. 获取强语义的向量
        vec1 = self.embedding_model.get_text_embedding(str1)
        vec2 = self.embedding_model.get_text_embedding(str2)

        # 2. 计算余弦相似度
        dot_product = np.dot(vec1, vec2)
        norm_vec1 = np.linalg.norm(vec1)
        norm_vec2 = np.linalg.norm(vec2)

        if norm_vec1 == 0 or norm_vec2 == 0:
            embedding_sim = 0.0
        else:
            embedding_sim = dot_product / (norm_vec1 * norm_vec2)

        # 3. 计算字符级别相似度
        char_sim = difflib.SequenceMatcher(None, str1, str2).ratio()

        # 4. 智能动态融合
        if embedding_sim >= 0.85:
            # 高置信度，主要靠向量
            final_score = embedding_sim
        elif 0.6 <= embedding_sim < 0.85:
            # 中等置信度，融合字面
            final_score = alpha * embedding_sim + (1 - alpha) * char_sim
        else:
            # 向量相似度太低，直接放弃（可以设个低分）
            final_score = 0.0

        return final_score

    def find_best_match(self,
                        target: Union[str, List[str]],  # 支持字符串或字符串列表
                        candidates: Union[List[str], List[np.ndarray]],
                        top_n: Optional[int] = 1) -> List[Tuple[int, float]]:
        """
        从候选列表中找到与目标字符串最相似的 top_n 个匹配项，返回索引和相似度分数

        :param target: 目标字符串或目标字符串列表
        :param candidates: 候选列表，可以是字符串（List[str]）或向量（List[np.ndarray]）
        :param top_n: 返回的最相似项数量，默认为 1
        :return: List[Tuple[int, float]] - 包含 (索引, 相似度分数) 的列表，按相似度从高到低排序
        """
        if not candidates or top_n <= 0:
            return []

        # 如果 target 是字符串列表，处理每个目标字符串
        if isinstance(target, list):
            all_best_matches = []
            for t in target:
                target_vec = self.embedding_model.get_text_embedding(t)
                target_norm = np.linalg.norm(target_vec)

                # 模为0
                if target_norm == 0:
                    continue

                if isinstance(candidates[0], str):
                    candidate_vecs = np.array([self.embedding_model.get_text_embedding(c) for c in candidates])
                else:
                    candidate_vecs = np.array(candidates)

                candidate_norms = np.linalg.norm(candidate_vecs, axis=1)
                dot_products = np.dot(candidate_vecs, target_vec)

                with np.errstate(divide='ignore', invalid='ignore'):
                    similarities = dot_products / (candidate_norms * target_norm)
                    similarities[candidate_norms == 0] = 0.0

                top_indices = np.argsort(similarities)[::-1][:top_n]
                all_best_matches.extend([(idx, similarities[idx]) for idx in top_indices])

            return sorted(all_best_matches, key=lambda x: x[1], reverse=True)[:top_n]

        # 如果 target 是单个字符串，直接处理
        target_vec = self.embedding_model.get_text_embedding(target)
        target_norm = np.linalg.norm(target_vec)

        # 模为0
        if target_norm == 0:
            return []

        if isinstance(candidates[0], str):
            candidate_vecs = np.array([self.embedding_model.get_text_embedding(c) for c in candidates])
        else:
            candidate_vecs = np.array(candidates)

        candidate_norms = np.linalg.norm(candidate_vecs, axis=1)
        dot_products = np.dot(candidate_vecs, target_vec)

        with np.errstate(divide='ignore', invalid='ignore'):
            similarities = dot_products / (candidate_norms * target_norm)
            similarities[candidate_norms == 0] = 0.0

        top_indices = np.argsort(similarities)[::-1][:top_n]
        return [(idx, similarities[idx]) for idx in top_indices]


if __name__ == '__main__':
    csm = CosineSimilarity()
