from curses import nl
from difflib import SequenceMatcher
import re
from typing import List, Dict
from datetime import timedelta
import spacy
from spacy.lang.vi import Vietnamese
from pythainlp import sent_tokenize as thai_sent_tokenize

class SegmentsAlignedProcessor:
    """
    处理对齐的片段
    """
    def __init__(self, correct_text: str, words: List[Dict]):
        self.correct_text = correct_text
        self.words = words

    def process(self) -> List[Dict]:
        """
        处理对齐的片段
        :return: 对齐的片段列表
        """
        return generate_results_from_json(self.correct_text, self.words)



def parse_time(time_obj):
    """解析时间对象，返回总秒数"""
    if not time_obj:
        return 0.0
    
    seconds = float(time_obj.get('seconds', 0))
    nanos = float(time_obj.get('nanos', 0))
    return seconds + nanos / 1_000_000_000

def format_srt_time(seconds):
    """将秒数转换为SRT时间格式 HH:MM:SS,mmm"""
    td = timedelta(seconds=seconds)
    hours, remainder = divmod(td.total_seconds(), 3600)
    minutes, seconds = divmod(remainder, 60)
    milliseconds = int((seconds % 1) * 1000)
    return f"{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d},{milliseconds:03d}"

def similarity(a, b):
    """计算两个字符串的相似度"""
    return SequenceMatcher(None, a, b).ratio()

def normalize_text_for_matching(text, language):
    """根据语言标准化文本用于匹配"""
    if language == 'vietnamese':
        # 越南语：统一声调符号处理
        text = text.lower()
    elif language == 'thai':
        # 泰语：移除某些可选的音调符号
        text = re.sub(r'[\u0e48-\u0e4b]', '', text)  # 移除声调符号
    elif language == 'english':
        # 英语：转为小写
        text = text.lower()
    
    # 移除空格进行匹配
    return text.replace(' ', '')

def find_word_sequence_in_words_list(sentence, words_list, start_index=0, max_search_range=None):
    """
    在words列表中找到与sentence最匹配的连续单词序列
    返回 (匹配的起始索引, 匹配的结束索引, 匹配度)
    
    Args:
        sentence: 要匹配的句子
        words_list: 单词列表
        start_index: 开始搜索的索引
        max_search_range: 最大搜索范围，如果为None则根据句子长度自动计算
    """
    # 检测语言
    language = detect_language(sentence)
    
    # 标准化句子文本
    normalized_sentence = normalize_text_for_matching(sentence, language)
    
    # 计算最大搜索范围
    if max_search_range is None:
        sentence_length = len(normalized_sentence)
        # 根据句子长度动态计算搜索范围
        if language in ['vietnamese', 'english']:
            # 拼音语言，搜索范围更大
            max_search_range = min(sentence_length * 3, 100)
        elif language == 'chinese':
            # 中文，搜索范围适中
            max_search_range = min(sentence_length * 2, 80)
        elif language == 'thai':
            # 泰语，搜索范围适中
            max_search_range = min(sentence_length * 2, 80)
        else:
            # 未知语言，使用保守范围
            max_search_range = min(sentence_length * 2, 60)
    
    # 限制搜索的结束位置
    search_end = min(start_index + max_search_range, len(words_list))
    
    print(f"搜索范围: {start_index} 到 {search_end} (共 {search_end - start_index} 个单词)")
    
    best_match = None
    best_similarity = 0
    
    # 滑动窗口搜索（限制在指定范围内）
    for start in range(start_index, search_end):
        # 动态调整窗口大小，根据语言特点
        max_window_size = len(normalized_sentence) + 10
        if language in ['vietnamese', 'english']:
            # 拼音语言可能需要更大的窗口
            max_window_size = len(normalized_sentence) + 20
        
        # 尝试不同长度的窗口
        for end in range(start + 1, min(start + max_window_size, len(words_list) + 1)):
            window_words = words_list[start:end]
            window_text = ''.join([w.get('word', '') for w in window_words])
            
            # 标准化窗口文本
            normalized_window = normalize_text_for_matching(window_text, language)
            
            # 计算相似度
            sim = similarity(normalized_sentence, normalized_window)
            
            # 根据语言调整相似度阈值
            threshold = 0.6
            if language in ['thai', 'vietnamese']:
                threshold = 0.5  # 对于某些小语种，降低阈值
            
            # 如果相似度足够高，记录这个匹配
            if sim > best_similarity and sim > threshold:
                best_similarity = sim
                best_match = (start, end - 1, sim)
    
    return best_match

def detect_language(text):
    """简单的语言检测"""
    # 中文字符
    chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', text))
    # 泰语字符
    thai_chars = len(re.findall(r'[\u0e00-\u0e7f]', text))
    # 越南语字符（包含带声调的拉丁字符）
    vietnamese_chars = len(re.findall(r'[\u00c0-\u024f\u1e00-\u1eff]', text))
    # 英文字符
    english_chars = len(re.findall(r'[a-zA-Z]', text))
    
    total_chars = len(text.replace(' ', ''))
    if total_chars == 0:
        return 'unknown'
    
    if chinese_chars / total_chars > 0.3:
        return 'chinese'
    elif thai_chars / total_chars > 0.3:
        return 'thai'
    elif vietnamese_chars / total_chars > 0.1:
        return 'vietnamese'
    elif english_chars / total_chars > 0.5:
        return 'english'
    else:
        return 'unknown'

def get_duration_multiplier(language):
    """根据语言获取时长估算系数"""
    multipliers = {
        'chinese': 0.15,      # 中文每字符约0.15秒
        'thai': 0.12,         # 泰语语速相对较快
        'vietnamese': 0.08,   # 越南语每字符较短（因为是拼音文字）
        'english': 0.05,      # 英文每字符最短
        'unknown': 0.12       # 默认值
    }
    return multipliers.get(language, 0.12)

def segment_text_by_punctuation(text):
    """多语种断句：泰语用pythainlp，越南语用pyvi，其他语言用句号分割"""
    language = detect_language(text)
    print(f"检测到语言: {language}")
    if language == 'thai':
        sentences = thai_sent_tokenize(text)
    elif language == 'vietnamese':
        nlp = Vietnamese()
        nlp.add_pipe('sentencizer')  # 添加句子分割器
        doc = nlp(text)
        sentences = [sent.text for sent in doc.sents]
    elif language == 'chinese':
        nlp = spacy.load("zh_core_web_sm")
        nlp.add_pipe('sentencizer')  # 添加句子分割器
        doc = nlp(text)
        sentences = [sent.text for sent in doc.sents]
    elif language == 'english':
        nlp = spacy.load("en_core_web_sm")
        nlp.add_pipe('sentencizer')  # 添加句子分割器
        doc = nlp(text)
        sentences = [sent.text for sent in doc.sents]
    else:
        sentences = [s.strip() for s in re.split(r'[。.!?]', text) if s.strip()]
    return sentences, language

def fill_missing_times(sentences, sentence_matches, language='chinese'):
    """填充缺失的时间信息，确保时间连续性"""
    
    # 获取语言对应的时长系数
    duration_multiplier = get_duration_multiplier(language)
    
    # 找到所有连续的未匹配区间
    i = 0
    while i < len(sentences):
        if sentence_matches[i] is None:
            # 找到未匹配区间的开始
            start_idx = i
            
            # 找到未匹配区间的结束
            while i < len(sentences) and sentence_matches[i] is None:
                i += 1
            end_idx = i - 1
            
            # 获取前后的时间锚点
            prev_end_time = None
            next_start_time = None
            
            # 前面的时间锚点
            if start_idx > 0:
                prev_end_time = sentence_matches[start_idx - 1]['end_time']
            
            # 后面的时间锚点
            if end_idx + 1 < len(sentences):
                next_start_time = sentence_matches[end_idx + 1]['start_time']
            
            # 计算这个区间有多少个句子
            unmatched_count = end_idx - start_idx + 1
            
            if prev_end_time is not None and next_start_time is not None:
                # 前后都有锚点，平均分配时间
                available_time = next_start_time - prev_end_time
                time_per_sentence = available_time / unmatched_count
                
                for j in range(start_idx, end_idx + 1):
                    sentence_position = j - start_idx
                    start_time = prev_end_time + sentence_position * time_per_sentence
                    end_time = start_time + time_per_sentence
                    
                    sentence_matches[j] = {
                        'sentence': sentences[j],
                        'start_time': start_time,
                        'end_time': end_time,
                        'matched': False
                    }
                    print(f"估算时间 (区间插值): {sentences[j]} -> {format_srt_time(start_time)} --> {format_srt_time(end_time)}")
            
            elif prev_end_time is not None:
                # 只有前面的锚点
                current_time = prev_end_time
                for j in range(start_idx, end_idx + 1):
                    sentence_length = len(sentences[j].replace(' ', ''))  # 去除空格计算长度
                    duration = max(2.0, sentence_length * duration_multiplier)
                    
                    start_time = current_time
                    end_time = current_time + duration
                    current_time = end_time
                    
                    sentence_matches[j] = {
                        'sentence': sentences[j],
                        'start_time': start_time,
                        'end_time': end_time,
                        'matched': False
                    }
                    print(f"估算时间 (向后延伸): {sentences[j]} -> {format_srt_time(start_time)} --> {format_srt_time(end_time)}")
            
            elif next_start_time is not None:
                # 只有后面的锚点
                total_duration = 0
                for j in range(start_idx, end_idx + 1):
                    sentence_length = len(sentences[j].replace(' ', ''))
                    duration = max(2.0, sentence_length * duration_multiplier)
                    total_duration += duration
                
                current_time = max(0, next_start_time - total_duration)
                for j in range(start_idx, end_idx + 1):
                    sentence_length = len(sentences[j].replace(' ', ''))
                    duration = max(2.0, sentence_length * duration_multiplier)
                    
                    start_time = current_time
                    end_time = current_time + duration
                    current_time = end_time
                    
                    sentence_matches[j] = {
                        'sentence': sentences[j],
                        'start_time': start_time,
                        'end_time': end_time,
                        'matched': False
                    }
                    print(f"估算时间 (向前回推): {sentences[j]} -> {format_srt_time(start_time)} --> {format_srt_time(end_time)}")
            
            else:
                # 没有任何锚点，从0开始
                current_time = 0
                for j in range(start_idx, end_idx + 1):
                    sentence_length = len(sentences[j].replace(' ', ''))
                    duration = max(2.0, sentence_length * duration_multiplier)
                    
                    start_time = current_time
                    end_time = current_time + duration
                    current_time = end_time
                    
                    sentence_matches[j] = {
                        'sentence': sentences[j],
                        'start_time': start_time,
                        'end_time': end_time,
                        'matched': False
                    }
                    print(f"估算时间 (从头开始): {sentences[j]} -> {format_srt_time(start_time)} --> {format_srt_time(end_time)}")
        else:
            i += 1

def generate_results_from_json(correct_text, words_list):
    
    print(f"原文本: {correct_text}")
    print(f"总词数: {len(words_list)}")
    
    # 根据标点符号分割文本，支持多语种
    sentences, language = segment_text_by_punctuation(correct_text)
    print(f"分割后的句子数: {len(sentences)}")
    
    # 第一轮：尝试匹配所有句子
    sentence_matches = [None] * len(sentences)
    current_word_index = 0
    
    for i, sentence in enumerate(sentences):
        print(f"\n第一轮处理句子 {i+1}: {sentence}")
        
        # 在words列表中找到对应的单词序列
        match_result = find_word_sequence_in_words_list(
            sentence, words_list, current_word_index
        )
        
        if match_result:
            start_idx, end_idx, similarity_score = match_result
            print(f"找到匹配: 索引 {start_idx}-{end_idx}, 相似度: {similarity_score:.2f}")
            
            # 获取时间信息
            start_time = parse_time(words_list[start_idx]['startTime'])
            end_time = parse_time(words_list[end_idx]['endTime'])
            
            sentence_matches[i] = {
                'sentence': sentence,
                'start_time': start_time,
                'end_time': end_time,
                'matched': True
            }
            
            # 更新搜索起始位置
            current_word_index = end_idx + 1
            
            print(f"时间: {format_srt_time(start_time)} --> {format_srt_time(end_time)}")
        else:
            print(f"警告: 无法找到句子的匹配: {sentence}")
    
    # 第二轮：填充缺失的时间信息
    print(f"\n第二轮：填充缺失的时间信息")
    fill_missing_times(sentences, sentence_matches, language)
    
    # 生成SRT内容
    results = []
    
    for i, match_info in enumerate(sentence_matches):
        if match_info is not None:
            # 根据语言处理文本清理
            cleaned_text = clean_text_by_language(match_info['sentence'], language)
            
            # 生成SRT条目
            results.append({
                'start': format_srt_time(match_info['start_time']),
                'end': format_srt_time(match_info['end_time']),
                'text': cleaned_text
            })
    return results

def clean_text_by_language(text, language):
    """严格返回原文本，不做任何清理"""
    text = text.replace('\n', ' ').strip()  # 去除换行符和首尾空格
    return text
