import re
#用于计算两个字符串的相似度
from difflib import SequenceMatcher
from .tokenizers import MultiLanguageTokenizer

# 定义特征提取函数 extract_features
def extract_features(text, lang='zh', tokenizer=None):
    """
    提取句子的关键特征（用于对齐打分）
    返回元组：(cleaned_text, feature_vector)
    """
    # 清理文本：去除非字母、数字、空格、中文、越南语字符和标点符号的字符，并用空格替换，然后合并多余空格
    cleaned = re.sub(r'[^\w\s\u4e00-\u9fff\u0e00-\u0e7f\u0100-\u017f.,!?;:，。？！；：]', ' ', text)
    cleaned = re.sub(r'\s+', ' ', cleaned).strip()

    # 提取锚点特征:是否包含问号、感叹号，数字的数量，以及清理后文本的长度
    has_question = '?' in text or '？' in text
    has_exclamation = '!' in text or '！' in text
    digit_count = len(re.findall(r'\d+', text))
    length = len(cleaned)

    # 使用分词器进行词汇级别分析
    word_count = 0
    if tokenizer:
        try:
            tokens = tokenizer.tokenize(text, lang)
            word_count = len(tokens)
        except Exception as e:
            print(f"分词失败 ({lang}): {e}")
            word_count = len(cleaned.split())
    else:
        word_count = len(cleaned.split())

    return cleaned, {
        'question': has_question,
        'exclamation': has_exclamation,
        'digit_count': digit_count,
        'length': length,
        'word_count': word_count
    }

#定义规则对齐类 RuleBasedAligner
class RuleBasedAligner:
    # 初始化时设置最小和最大长度比例，默认0.3到5.0
    def __init__(self, min_length_ratio=0.3, max_length_ratio=5.0, lang1='zh', lang2='vi'):
        self.min_length_ratio = min_length_ratio
        self.max_length_ratio = max_length_ratio
        self.lang1 = lang1
        self.lang2 = lang2
        # 初始化多语言分词器
        self.tokenizer = MultiLanguageTokenizer()

        # 针对不同语言对设置不同的权重参数
        self._setup_language_weights()

    def _setup_language_weights(self):
        """根据语言对设置不同的权重参数"""
        # 中文-泰语对的特殊权重设置
        if (self.lang1 == 'zh' and self.lang2 == 'th') or (self.lang1 == 'th' and self.lang2 == 'zh'):
            self.feature_weight = 0.90  # 进一步增加特征权重
            self.text_weight = 0.10     # 进一步降低文本相似度权重
            self.similarity_threshold = 0.70  # 提高相似度阈值，更严格的匹配
            self.min_length_ratio = 0.15   # 放宽长度比例要求
            self.max_length_ratio = 10.0   # 允许更大的长度差异
        # 中文-越南语对（原有配置）
        elif (self.lang1 == 'zh' and self.lang2 == 'vi') or (self.lang1 == 'vi' and self.lang2 == 'zh'):
            self.feature_weight = 0.7
            self.text_weight = 0.3
            self.similarity_threshold = 0.4
        else:
            # 其他语言对的默认配置
            self.feature_weight = 0.7
            self.text_weight = 0.3
            self.similarity_threshold = 0.4

    def _length_compatible(self, len1, len2):
        # 判断两个长度是否在允许的比例范围内
        if len2 == 0 or len1 == 0:
            return False
        ratio = min(len1, len2) / max(len1, len2)
        return ratio >= self.min_length_ratio

    def _feature_match_score(self, feat1, feat2):
        """基于特征计算匹配分（0~1）"""
        score = 0.0

        # 基础特征匹配（标点符号）- 权重更高
        if feat1['question'] == feat2['question']:
            score += 0.25  # 问句匹配权重增加
        if feat1['exclamation'] == feat2['exclamation']:
            score += 0.20  # 感叹句匹配权重

        # 数字匹配 - 高权重
        if feat1['digit_count'] == feat2['digit_count']:
            if feat1['digit_count'] > 0:  # 如果包含数字
                score += 0.35  # 数字匹配高权重
            else:
                score += 0.05  # 都不含数字的低权重

        # 词数比例 - 对中泰文非常重要，权重提高
        word_ratio = min(feat1['word_count'], feat2['word_count']) / (max(feat1['word_count'], feat2['word_count']) + 1e-5)
        score += 0.30 * word_ratio

        # 长度比例 - 权重进一步降低
        len_ratio = min(feat1['length'], feat2['length']) / (max(feat1['length'], feat2['length']) + 1e-5)
        score += 0.05 * len_ratio

        # 中泰文特殊处理：检查句子结构模式
        if (self.lang1 == 'zh' and self.lang2 == 'th') or (self.lang1 == 'th' and self.lang2 == 'zh'):
            structure_score = self._calculate_structure_score(feat1, feat2)
            score += structure_score * 0.20

        return min(score, 1.0)

    def _calculate_structure_score(self, feat1, feat2):
        """计算中泰文句子结构匹配分数"""
        score = 0.0

        # 简短句子（问候语、礼貌用语等）通常对应简短的句子
        if feat1['length'] <= 5 and feat2['length'] <= 15:
            score += 0.3  # 可能是问候语等简短表达
        elif feat1['length'] <= 10 and feat2['length'] <= 30:
            score += 0.2  # 中等长度句子

        # 长句子更可能是复杂句，对应关系也更复杂
        if feat1['word_count'] >= 4 and feat2['word_count'] >= 3:
            score += 0.2
        elif feat1['word_count'] >= 6 and feat2['word_count'] >= 5:
            score += 0.4

        # 问句的特殊处理
        if feat1['question'] and feat2['question']:
            score += 0.3  # 问句匹配额外加分

        return score

    def _text_similarity(self, s1, s2):
        # 使用SequenceMatcher计算两个字符串的相似度
        return SequenceMatcher(None, s1, s2).ratio()

    def align(self, raw_sents1, raw_sents2):
        """
        输入：原始句子列表（未清洗）
        输出：对齐结果 [{'index1', 'index2', 'similarity'}, ...]
        """
        # 对两个原始句子列表进行对齐
        # 先提取每个句子的特征，使用对应的语言分词器
        items1 = [extract_features(s, self.lang1, self.tokenizer) for s in raw_sents1]
        items2 = [extract_features(s, self.lang2, self.tokenizer) for s in raw_sents2]

        aligned = []# 存储对齐结果
        used_j = set()# 记录第二个句子列表中已经匹配的索引

        # 对于中泰文，使用两阶段匹配策略
        if (self.lang1 == 'zh' and self.lang2 == 'th') or (self.lang1 == 'th' and self.lang2 == 'zh'):
            return self._zh_th_align(raw_sents1, raw_sents2, items1, items2)
        else:
            # 其他语言对使用原有逻辑
            # 遍历第一个句子列表
            for i, (clean1, feat1) in enumerate(items1):
                best_j, best_score = -1, 0.0
                # 在第二个句子列表中寻找最佳匹配
                for j, (clean2, feat2) in enumerate(items2):
                    if j in used_j:
                        continue
                     # 如果长度不兼容，跳过
                    if not self._length_compatible(feat1['length'], feat2['length']):
                        continue

                    # 计算特征匹配分数和文本相似度分数，然后使用动态权重求和
                    feature_score = self._feature_match_score(feat1, feat2)
                    text_score = self._text_similarity(clean1, clean2)
                    total_score = self.feature_weight * feature_score + self.text_weight * text_score

                    if total_score > best_score:
                        best_score = total_score
                        best_j = j
                # 如果找到最佳匹配且分数超过阈值，则记录对齐对
                if best_j != -1 and best_score >= self.similarity_threshold:
                    aligned.append({
                        'index1': i,
                        'index2': best_j,
                        'similarity': round(best_score, 4)
                    })
                    used_j.add(best_j)

        return aligned

    def _zh_th_align(self, items1, items2):
        """专门的中泰文对齐算法 - 优化版"""
        aligned = []
        used_i = set()  # 已使用的中文索引
        used_j = set()  # 已使用的泰文索引

        # 首先尝试位置匹配（假设文件基本按顺序对应）
        position_matches = self._position_based_match(items1, items2)
        for i, j, score in position_matches:
            if score >= 0.75:  # 位置匹配需要高置信度
                aligned.append({
                    'index1': i,
                    'index2': j,
                    'similarity': round(score, 4)
                })
                used_i.add(i)
                used_j.add(j)

        # 计算剩余句子的所有可能匹配分数
        match_scores = []
        for i, (clean1, feat1) in enumerate(items1):
            if i in used_i:
                continue
            for j, (clean2, feat2) in enumerate(items2):
                if j in used_j:
                    continue
                if not self._length_compatible(feat1['length'], feat2['length']):
                    continue

                # 计算匹配分数
                feature_score = self._feature_match_score(feat1, feat2)
                text_score = self._text_similarity(clean1, clean2)
                total_score = self.feature_weight * feature_score + self.text_weight * text_score

                if total_score >= self.similarity_threshold:
                    match_scores.append((i, j, total_score))

        # 按分数降序排序
        match_scores.sort(key=lambda x: x[2], reverse=True)

        # 贪心选择匹配：优先选择最高分且未使用的配对
        for i, j, score in match_scores:
            if i not in used_i and j not in used_j:
                aligned.append({
                    'index1': i,
                    'index2': j,
                    'similarity': round(score, 4)
                })
                used_i.add(i)
                used_j.add(j)

        return aligned

    def _position_based_match(self, items1, items2):
        """基于位置的匹配（假设文件基本按顺序对应）"""
        matches = []

        # 获取两个列表的长度
        len1, len2 = len(items1), len(items2)

        # 计算位置映射关系
        for i in range(min(len1, len2)):
            clean1, feat1 = items1[i]
            clean2, feat2 = items2[i]

            # 基本兼容性检查
            if not self._length_compatible(feat1['length'], feat2['length']):
                continue

            # 计算匹配分数
            feature_score = self._feature_match_score(feat1, feat2)
            text_score = self._text_similarity(clean1, clean2)
            total_score = self.feature_weight * feature_score + self.text_weight * text_score

            # 位置匹配加分
            position_bonus = 0.2 * (1.0 - abs(i - i) / max(len1, len2))
            total_score += position_bonus

            if total_score >= 0.6:  # 较低的阈值用于初步筛选
                matches.append((i, i, total_score))

        return matches

    def _high_confidence_match(self, items1, items2):
        """高置信度匹配：识别明显的配对模式"""
        matches = []
        used_indices_i = set()  # 防止同一个索引被多次匹配

        for i, (clean1, feat1) in enumerate(items1):
            if i in used_indices_i:
                continue

            best_j, best_confidence = -1, 0.0

            for j, (clean2, feat2) in enumerate(items2):
                confidence = 0.0

                # 问候语匹配（简短句子）
                if (feat1['length'] <= 5 and feat2['length'] <= 20):
                    # 检查是否是常见问候语
                    if self._is_greeting(clean1, self.lang1) and self._is_greeting(clean2, self.lang2):
                        confidence = 0.95

                # 问句匹配
                elif feat1['question'] and feat2['question']:
                    # 问句且有合适的长度比例
                    word_ratio = min(feat1['word_count'], feat2['word_count']) / max(feat1['word_count'], feat2['word_count'])
                    if word_ratio >= 0.5:
                        confidence = 0.85

                # 数字匹配
                elif feat1['digit_count'] > 0 and feat1['digit_count'] == feat2['digit_count']:
                    confidence = 0.90

                # 感叹句匹配
                elif feat1['exclamation'] and feat2['exclamation']:
                    word_ratio = min(feat1['word_count'], feat2['word_count']) / max(feat1['word_count'], feat2['word_count'])
                    if word_ratio >= 0.4:
                        confidence = 0.80

                # 更新最佳匹配
                if confidence > best_confidence:
                    best_confidence = confidence
                    best_j = j

            if best_confidence >= 0.80 and best_j != -1:
                matches.append((i, best_j, best_confidence))
                used_indices_i.add(i)

        return matches

    def _is_greeting(self, text, lang):
        """判断是否是问候语"""
        greetings = {
            'zh': ['你好', '您好', '早上好', '下午好', '晚上好', '再见', '谢谢', '感谢'],
            'th': ['สวัสดี', 'สวัสดีครับ', 'สวัสดีค่ะ', 'ขอบคุณ', 'ขอบคุณครับ', 'ขอบคุณค่ะ', 'ลาก่อน', 'สวัสดีตอนเช้า']
        }

        if lang in greetings:
            for greeting in greetings[lang]:
                if greeting in text or text in greeting:
                    return True
        return False