import WordMetrics # Direct import
import numpy as np
from string import punctuation
from dtwalign import dtw_from_distance_matrix # External dependency
import time
from typing import List, Tuple
# from ortools.sat.python import cp_model # External dependency, currently commented out

offset_blank = 1 # This determines if a blank option is added for DTW (cost for skipping a real word)
TIME_THRESHOLD_MAPPING = 5.0 # For the OR-Tools path, not currently used

WORD_NOT_FOUND_TOKEN = '-' # Define as a module constant

def get_word_distance_matrix(words_estimated: List[str], words_real: List[str]) -> np.ndarray:
    """Calculates the edit distance matrix between two lists of words (strings)."""
    # Handle None or empty lists gracefully
    words_estimated = words_estimated if words_estimated is not None else []
    words_real = words_real if words_real is not None else []

    number_of_real_words = len(words_real)
    number_of_estimated_words = len(words_estimated)

    # Initialize distance matrix
    # Rows: estimated words (+ optional blank), Columns: real words
    # The dtwalign library expects dist_matrix[i, j] to be cost between estimated[i] and real[j]
    # So, if offset_blank is used for estimated, it means a cost for matching real_word[j] with a skip/blank from estimated.
    # However, dtw_from_distance_matrix(word_distance_matrix.T) implies word_distance_matrix should be [real, estimated]
    # Let's stick to [estimated, real] for matrix creation, and transpose if dtwalign needs it.
    # The original code transposes for dtw_from_distance_matrix.
    
    matrix_rows = number_of_estimated_words + offset_blank
    matrix_cols = number_of_real_words
    word_distance_matrix = np.full((matrix_rows, matrix_cols), np.inf) # Initialize with infinity

    for idx_estimated in range(number_of_estimated_words):
        for idx_real in range(number_of_real_words):
            cost = WordMetrics.edit_distance_python(
                words_estimated[idx_estimated], words_real[idx_real]
            )
            word_distance_matrix[idx_estimated, idx_real] = cost

    if offset_blank == 1:
        # Cost of aligning a real word with a "blank" estimated word (i.e., real word is omitted by ASR)
        # This is typically the cost of inserting the real word.
        for idx_real in range(number_of_real_words):
            word_distance_matrix[number_of_estimated_words, idx_real] = len(words_real[idx_real]) 
            # Cost to insert/emit the real_word if it's matched to this blank/skip state in estimated

    return word_distance_matrix


# def get_best_path_from_distance_matrix(word_distance_matrix): # OR-Tools part, commented out
#     # ... (original OR-Tools code) ...
#     pass


def get_resulting_string_from_dtw_path(dtw_alignment_path: List[Tuple[int, int]], 
                                     words_estimated: List[str], 
                                     words_real: List[str]) -> Tuple[List[str], List[int]]:
    """
    Constructs the mapped words and their original indices from a DTW alignment path.
    dtw_alignment_path: List of (index_in_words_estimated, index_in_words_real) tuples.
                        Note: dtwalign path might be (query_idx, ref_idx). If dist_matrix.T was used,
                        then query=real, ref=estimated.
                        If dist_matrix was (estimated, real) and not transposed, then query=estimated, ref=real.
                        The original code used word_distance_matrix.T, so path is (real_idx, estimated_idx).
    """
    # 新增：类型检查，防止路径格式异常
    if not isinstance(dtw_alignment_path, (list, tuple, np.ndarray)) or \
       (len(dtw_alignment_path) > 0 and not (isinstance(dtw_alignment_path[0], (list, tuple, np.ndarray)) and len(dtw_alignment_path[0]) == 2)):
        print("[ERROR get_resulting_string_from_dtw_path] 路径格式异常，全部标记为遗漏")
        mapped_words = [WORD_NOT_FOUND_TOKEN] * len(words_real)
        mapped_words_indices = [-1] * len(words_real)
        return mapped_words, mapped_words_indices
    # ---- START NEW DEBUG PRINTS ----
    print(f"[DEBUG get_resulting_string_from_dtw_path] type(dtw_alignment_path): {type(dtw_alignment_path)}")
    print(f"[DEBUG get_resulting_string_from_dtw_path] dtw_alignment_path value: {dtw_alignment_path}")
    # ---- END NEW DEBUG PRINTS ----

    mapped_words = [WORD_NOT_FOUND_TOKEN] * len(words_real)
    mapped_words_indices = [-1] * len(words_real) # Indices in words_estimated

    # dtw_alignment_path from dtw_from_distance_matrix(dist.T) is a list of (idx_real, idx_estimated)
    # if offset_blank was used for the last row of dist (representing skips in estimated), 
    # then idx_estimated can go up to len(words_estimated).

    for real_idx, est_idx in dtw_alignment_path:
        if real_idx < len(words_real): # Ensure real_idx is valid
            if est_idx < len(words_estimated): # Matched to an actual estimated word
                # If a real word is mapped multiple times, take the first mapping in path (DTW standard)
                # Or, if a real_word was already mapped by a better (earlier in path) est_word, one might compare costs.
                # For simplicity, dtw path gives one mapping. If a real_idx appears multiple times, it means 
                # different parts of an estimated word (if sub-word units) or multiple estimated words map to it (stretch).
                # We are doing word-level DTW, so each step in path is a word-to-word or word-to-skip mapping.
                if mapped_words[real_idx] == WORD_NOT_FOUND_TOKEN: # Take the first match for this real_word
                    mapped_words[real_idx] = words_estimated[est_idx]
                    mapped_words_indices[real_idx] = est_idx
                # Else: this real_word was already mapped. Could implement logic to choose best if needed.
            else: # Matched to the "blank" / skip state for estimated words
                  # This case means words_real[real_idx] is considered omitted by ASR
                  # The mapped_words entry remains WORD_NOT_FOUND_TOKEN
                  pass 

    return mapped_words, mapped_words_indices


def get_best_mapped_words(words_estimated: List[str], words_real: List[str], use_dtw:bool = True) -> Tuple[List[str], List[int]]:
    if not words_real: # If there are no real words, no mapping can be done
        return [], [] 
    if not words_estimated: # If ASR produced nothing, all real words are effectively omitted
        return [WORD_NOT_FOUND_TOKEN] * len(words_real), [-1] * len(words_real)

    # Distance matrix: rows=estimated_words (+blank), cols=real_words
    # word_distance_matrix[i,j] = cost between estimated[i] and real[j]
    word_distance_matrix = get_word_distance_matrix(words_estimated, words_real)

    if use_dtw:
        # ---- START DEBUG PRINTS ----
        print(f"[DEBUG WordMatching] words_estimated: {words_estimated}")
        print(f"[DEBUG WordMatching] words_real: {words_real}")
        print(f"[DEBUG WordMatching] word_distance_matrix.T shape: {word_distance_matrix.T.shape}")
        print(f"[DEBUG WordMatching] word_distance_matrix.T content:\\n{word_distance_matrix.T}")
        calculated_window_size = max(1,int(0.2*max(len(words_real),len(words_estimated))))
        print(f"[DEBUG WordMatching] calculated_window_size: {calculated_window_size}")

        # ---- START MODIFICATION ----
        # Remove previous window adjustment for len(words_real) == 1
        # if len(words_real) == 1:
        #     new_window_size = len(words_estimated) + offset_blank 
        #     print(f"[DEBUG WordMatching] len(words_real) is 1, adjusting window_size from {calculated_window_size} to {new_window_size}")
        #     calculated_window_size = new_window_size
        # ---- END MODIFICATION ----

        # ---- END DEBUG PRINTS ----
        # dtwalign expects distance_matrix[query_idx, reference_idx]
        # If we want to align words_real (query) to words_estimated (reference),
        # we need dist_matrix[real_idx, estimated_idx]. So, pass word_distance_matrix.T
        # The path will be list of (real_idx, estimated_idx) tuples.
        
        alignment = None # Initialize alignment
        if len(words_real) == 1:
            if word_distance_matrix.T.shape[1] > 0: # Ensure there are estimated words/blank
                best_est_idx = np.argmin(word_distance_matrix.T[0])
                # Ensure best_est_idx is a standard Python int if it's a numpy type
                dtw_path = [(0, int(best_est_idx))] 
                print(f"[DEBUG WordMatching] Using direct best match for single real word. dtw_path: {dtw_path}")
            else:
                dtw_path = [] # No estimated words to map to
        elif word_distance_matrix.shape[1] == 0 : # No real words to align against
             dtw_path = [] # Or handle as error / return empty mapping earlier
        elif word_distance_matrix.shape[0] == 0 : # No estimated words
             dtw_path = [] # Or handle as error / return empty mapping earlier
        else:
            # Original DTW path logic for multiple real words
            try:
                alignment = dtw_from_distance_matrix(word_distance_matrix.T, window_type="sakoechiba", window_size=calculated_window_size)
                dtw_path = alignment.get_warping_path()
                # 新增：如果dtw_path不是标准二维路径，直接判定为失败
                if not isinstance(dtw_path, (list, tuple, np.ndarray)) or \
                   (len(dtw_path) > 0 and not (isinstance(dtw_path[0], (list, tuple, np.ndarray)) and len(dtw_path[0]) == 2)):
                    raise ValueError("DTW路径格式异常")
            except Exception as e:
                print(f"[ERROR WordMatching] DTW对齐失败: {e}，尝试LCS对齐")
                mapped_words, mapped_words_indices = lcs_align(words_estimated, words_real)
                # 如果LCS完全无重叠，才全判遗漏
                if all(w == WORD_NOT_FOUND_TOKEN for w in mapped_words):
                    print("[ERROR WordMatching] LCS也无重叠，全部标记为遗漏")
                return mapped_words, mapped_words_indices
        
        # ---- START NEW DEBUG PRINTS ----
        print(f"[DEBUG WordMatching] type(alignment object if exists): {type(alignment) if alignment is not None else 'N/A'}")
        # print(f"[DEBUG WordMatching] alignment object if exists: {alignment if alignment is not None else 'N/A'}")
        print(f"[DEBUG WordMatching] FINAL type(dtw_path): {type(dtw_path)}")
        print(f"[DEBUG WordMatching] FINAL dtw_path value: {dtw_path}")
        # ---- END NEW DEBUG PRINTS ----

        mapped_words, mapped_words_indices = get_resulting_string_from_dtw_path(
            dtw_path, words_estimated, words_real
        )
    else:
        # OR-Tools path (currently commented out and not maintained based on original file)
        # mapped_indices_or_tools = get_best_path_from_distance_matrix(word_distance_matrix)
        # Fallback to DTW if OR-Tools not used or fails
        print("Warning: OR-Tools path for word mapping is not active. Falling back to DTW.")
        alignment = dtw_from_distance_matrix(word_distance_matrix.T, window_type="sakoechiba", window_size=calculated_window_size) # Added a window
        dtw_path = alignment.get_warping_path()
        mapped_words, mapped_words_indices = get_resulting_string_from_dtw_path(
            dtw_path, words_estimated, words_real
        )

    return mapped_words, mapped_words_indices


# get_best_mapped_words_dtw seems redundant if get_best_mapped_words defaults to DTW.
# Keeping it commented for now.
# def get_best_mapped_words_dtw(words_estimated: list, words_real: list) -> list:
#     from dtwalign import dtw_from_distance_matrix
#     word_distance_matrix = get_word_distance_matrix(words_estimated, words_real)
#     # The original .path[:-1,0] indexing was likely specific to a certain dtw object structure or goal.
#     # Using get_warping_path() is generally more robust.
#     alignment = dtw_from_distance_matrix(word_distance_matrix.T)
#     dtw_path = alignment.get_warping_path()
#     mapped_words, mapped_words_indices = get_resulting_string_from_dtw_path(dtw_path, words_estimated, words_real)
#     return mapped_words, mapped_words_indices


def getWhichLettersWereTranscribedCorrectly(real_word: str, transcribed_word: str) -> List[int]:
    # Ensure inputs are strings
    real_w = real_word if real_word is not None else ""
    transcribed_w = transcribed_word if transcribed_word is not None else ""

    # This function assumes a simple character-by-character comparison after word alignment.
    # It does NOT perform sub-word (character-level) alignment like Levenshtein for letters.
    # It's more of a direct overlay if words are of same length, or prefix match if different.
    # The original code had an indexing error if transcribed_word was shorter.
    
    is_letter_correct = [0] * len(real_w) # Initialize all as incorrect
    
    # For a more robust letter comparison, one might consider char-level edit distance or alignment here.
    # Current simple comparison: direct match up to the length of the shorter word.
    # This was the implicit behavior of the original due to potential IndexError.
    
    # Let's refine: Use Levenshtein at char level to find best alignment of letters
    # then mark based on that. For now, stick to a simplified version that is less prone to index errors.
    # The original code had: transcribed_word[idx] = transcribed_word[idx].lower()
    # which would fail if transcribed_word is shorter than real_word.

    # A simple approach for now: if the transcribed word is a placeholder, all letters are wrong.
    if transcribed_w == WORD_NOT_FOUND_TOKEN or not transcribed_w:
        return is_letter_correct # All 0s (incorrect)

    # Compare letter by letter up to the length of the real_word
    # This still isn't perfect for insertions/deletions within the word.
    # A true sub-word alignment would be needed for that.
    for idx, real_char in enumerate(real_w):
        real_char_lower = real_char.lower()
        if idx < len(transcribed_w):
            transcribed_char_lower = transcribed_w[idx].lower()
            if real_char_lower == transcribed_char_lower or real_char in punctuation:
                is_letter_correct[idx] = 1
        # If transcribed_w is shorter, remaining letters in real_w are considered not matched (0)
            
    return is_letter_correct


def parseLetterErrorsToHTML(word_real: str, is_leter_correct: List[int]) -> str:
    if not word_real or len(word_real) != len(is_leter_correct):
        return word_real # Or handle error
        
    word_colored = ''
    # HTML styling would be better than these placeholders
    correct_color_start = '<span class="correct-letter">' # Example
    correct_color_end = '</span>'
    wrong_color_start = '<span class="wrong-letter">'   # Example
    wrong_color_end = '</span>'
    
    for idx, letter in enumerate(word_real):
        if is_leter_correct[idx] == 1:
            word_colored += correct_color_start + letter + correct_color_end
        else:
            word_colored += wrong_color_start + letter + wrong_color_end
    return word_colored 

# 新增：LCS对齐函数

def lcs_align(words_estimated, words_real):
    m, n = len(words_estimated), len(words_real)
    dp = [[0]*(n+1) for _ in range(m+1)]
    for i in range(m):
        for j in range(n):
            if words_estimated[i] == words_real[j]:
                dp[i+1][j+1] = dp[i][j]+1
            else:
                dp[i+1][j+1] = max(dp[i][j+1], dp[i+1][j])
    # 回溯找路径
    i, j = m, n
    mapped_words = [WORD_NOT_FOUND_TOKEN]*n
    mapped_words_indices = [-1]*n
    while i > 0 and j > 0:
        if words_estimated[i-1] == words_real[j-1]:
            mapped_words[j-1] = words_estimated[i-1]
            mapped_words_indices[j-1] = i-1
            i -= 1
            j -= 1
        elif dp[i-1][j] >= dp[i][j-1]:
            i -= 1
        else:
            j -= 1
    return mapped_words, mapped_words_indices 