import WordMetrics
import numpy as np
from string import punctuation
from dtwalign import dtw_from_distance_matrix
import time
from typing import List, Tuple

WORD_NOT_FOUND_TOKEN = '-'

class WordMatcher:
    """
    一个封装了单词匹配和对齐逻辑的类。
    这个类作为 PronunciationTrainer 的一部分，负责找出参考文本和
    ASR识别文本之间的最佳对齐关系。
    """
    def __init__(self, language: str):
        """
        初始化 WordMatcher。
        """
        self.language = language
        # offset_blank = 1 determines if a blank option is added for DTW (cost for skipping a real word)
        self.offset_blank = 1
        print(f"[WordMatcher] Initialized for language: {language}")

    def get_match_result(self, words_estimated: List[str], words_real: List[str]) -> Tuple[List[str], List[int]]:
        """
        执行单词对齐，并返回匹配结果。
        这是对现有 get_best_mapped_words 函数的封装。
        """
        start_time = time.time()
        
        mapped_words, mapped_indices = self._get_best_mapped_words(
            words_estimated, words_real, use_dtw=True
        )
        
        end_time = time.time()
        print(f"匹配转录文本耗时: {(end_time - start_time):.4f}s")
        
        return mapped_words, mapped_indices

    def _get_word_distance_matrix(self, words_estimated: List[str], words_real: List[str]) -> np.ndarray:
        """Calculates the edit distance matrix between two lists of words (strings)."""
        words_estimated = words_estimated if words_estimated is not None else []
        words_real = words_real if words_real is not None else []

        number_of_real_words = len(words_real)
        number_of_estimated_words = len(words_estimated)
        
        matrix_rows = number_of_estimated_words + self.offset_blank
        matrix_cols = number_of_real_words
        word_distance_matrix = np.full((matrix_rows, matrix_cols), np.inf)

        for idx_estimated in range(number_of_estimated_words):
            for idx_real in range(number_of_real_words):
                estimated_word_clean = self._remove_punctuation(words_estimated[idx_estimated]).lower()
                real_word_clean = self._remove_punctuation(words_real[idx_real]).lower()
                
                cost = WordMetrics.edit_distance_python(
                    estimated_word_clean, real_word_clean
                )
                word_distance_matrix[idx_estimated, idx_real] = cost

        if self.offset_blank == 1:
            for idx_real in range(number_of_real_words):
                real_word_clean = self._remove_punctuation(words_real[idx_real])
                word_distance_matrix[number_of_estimated_words, idx_real] = len(real_word_clean) 

        return word_distance_matrix

    def _remove_punctuation(self, word: str) -> str:
        """去除单词中的标点符号"""
        return ''.join([char for char in word if char not in punctuation])

    def _get_resulting_string_from_dtw_path(self, dtw_alignment_path: List[Tuple[int, int]], 
                                           words_estimated: List[str], 
                                           words_real: List[str]) -> Tuple[List[str], List[int]]:
        """
        Constructs the mapped words and their original indices from a DTW alignment path.
        """
        if not isinstance(dtw_alignment_path, (list, tuple, np.ndarray)) or \
           (len(dtw_alignment_path) > 0 and not (isinstance(dtw_alignment_path[0], (list, tuple, np.ndarray)) and len(dtw_alignment_path[0]) == 2)):
            print("[ERROR] dtw_alignment_path has an abnormal format. Marking all as omitted.")
            mapped_words = [WORD_NOT_FOUND_TOKEN] * len(words_real)
            mapped_words_indices = [-1] * len(words_real)
            return mapped_words, mapped_words_indices

        mapped_words = [WORD_NOT_FOUND_TOKEN] * len(words_real)
        mapped_words_indices = [-1] * len(words_real)

        for real_idx, est_idx in dtw_alignment_path:
            if real_idx < len(words_real):
                if est_idx < len(words_estimated):
                    if mapped_words[real_idx] == WORD_NOT_FOUND_TOKEN:
                        mapped_words[real_idx] = words_estimated[est_idx]
                        mapped_words_indices[real_idx] = est_idx
                else:
                    pass 

        return mapped_words, mapped_words_indices

    def _lcs_align(self, words_estimated, words_real):
        m, n = len(words_estimated), len(words_real)
        dp = [[0]*(n+1) for _ in range(m+1)]
        for i in range(m):
            for j in range(n):
                estimated_clean = self._remove_punctuation(words_estimated[i]).lower()
                real_clean = self._remove_punctuation(words_real[j]).lower()
                
                if estimated_clean == real_clean:
                    dp[i+1][j+1] = dp[i][j]+1
                else:
                    dp[i+1][j+1] = max(dp[i][j+1], dp[i+1][j])
        # Backtrack to find the path
        i, j = m, n
        mapped_words = [WORD_NOT_FOUND_TOKEN]*n
        mapped_words_indices = [-1]*n
        while i > 0 and j > 0:
            estimated_clean = self._remove_punctuation(words_estimated[i-1]).lower()
            real_clean = self._remove_punctuation(words_real[j-1]).lower()
            
            if estimated_clean == real_clean:
                mapped_words[j-1] = words_estimated[i-1]
                mapped_words_indices[j-1] = i-1
                i -= 1
                j -= 1
            elif dp[i-1][j] >= dp[i][j-1]:
                i -= 1
            else:
                j -= 1
        return mapped_words, mapped_words_indices

    def _get_best_mapped_words(self, words_estimated: List[str], words_real: List[str], use_dtw:bool = True) -> Tuple[List[str], List[int]]:
        if not words_real:
            return [], [] 
        if not words_estimated:
            return [WORD_NOT_FOUND_TOKEN] * len(words_real), [-1] * len(words_real)

        word_distance_matrix = self._get_word_distance_matrix(words_estimated, words_real)

        if use_dtw:
            calculated_window_size = max(1, int(0.2 * max(len(words_real), len(words_estimated))))
            
            dtw_path = []
            if len(words_real) == 1:
                if word_distance_matrix.T.shape[1] > 0:
                    best_est_idx = np.argmin(word_distance_matrix.T[0])
                    dtw_path = [(0, int(best_est_idx))] 
            elif word_distance_matrix.shape[1] == 0 or word_distance_matrix.shape[0] == 0:
                dtw_path = []
            else:
                try:
                    alignment = dtw_from_distance_matrix(word_distance_matrix.T, window_type="sakoechiba", window_size=calculated_window_size)
                    dtw_path = alignment.get_warping_path()
                    if not isinstance(dtw_path, (list, tuple, np.ndarray)) or \
                       (len(dtw_path) > 0 and not (isinstance(dtw_path[0], (list, tuple, np.ndarray)) and len(dtw_path[0]) == 2)):
                        raise ValueError("DTW path format is abnormal")
                except Exception as e:
                    print(f"[ERROR] DTW alignment failed: {e}. Attempting LCS alignment.")
                    return self._lcs_align(words_estimated, words_real)
            
            mapped_words, mapped_words_indices = self._get_resulting_string_from_dtw_path(
                dtw_path, words_estimated, words_real
            )
        else:
            # Fallback for non-DTW case, which is not currently used
            print("Warning: OR-Tools path for word mapping is not active. Falling back to DTW.")
            calculated_window_size = max(1, int(0.2 * max(len(words_real), len(words_estimated))))
            alignment = dtw_from_distance_matrix(word_distance_matrix.T, window_type="sakoechiba", window_size=calculated_window_size)
            dtw_path = alignment.get_warping_path()
            mapped_words, mapped_words_indices = self._get_resulting_string_from_dtw_path(
                dtw_path, words_estimated, words_real
            )

        return mapped_words, mapped_words_indices

# These functions are not directly part of the matching logic but are used elsewhere.
# For now, they can remain at the module level to avoid breaking other parts of the app.
# Ideally, they should be moved to a more suitable utility module.

def getWhichLettersWereTranscribedCorrectly(real_word: str, transcribed_word: str) -> List[int]:
    real_w = real_word if real_word is not None else ""
    transcribed_w = transcribed_word if transcribed_word is not None else ""
    is_letter_correct = [0] * len(real_w)
    
    if transcribed_w == WORD_NOT_FOUND_TOKEN or not transcribed_w:
        return is_letter_correct

    for idx, real_char in enumerate(real_w):
        real_char_lower = real_char.lower()
        if idx < len(transcribed_w):
            transcribed_char_lower = transcribed_w[idx].lower()
            if real_char_lower == transcribed_char_lower or real_char in punctuation:
                is_letter_correct[idx] = 1
            
    return is_letter_correct

def parseLetterErrorsToHTML(word_real: str, is_leter_correct: List[int]) -> str:
    if not word_real or len(word_real) != len(is_leter_correct):
        return word_real
        
    word_colored = ''
    for idx, letter_was_correct in enumerate(is_leter_correct):
        if letter_was_correct:
            word_colored += f'<font color="green">{word_real[idx]}</font>'
        else:
            word_colored += f'<font color="red">{word_real[idx]}</font>'

    return word_colored