import os
import re
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from src.ui.i18n import _
from PySide6.QtCore import Signal
from src.ui.base_worker import BaseWorker
from src.utils.srt_parser import SRTParser, Subtitle
from src.translation.openai_translator import OpenAITranslator, TranslationError
from src.translation.microsoft_translator import MicrosoftTranslator
from src.utils.logger import get_logger
from src.config import app_config
from datetime import timedelta

def _create_dynamic_batches(subtitles, logger):
    """Create batches of subtitles dynamically, trying to create meaningful semantic chunks."""
    min_lines = app_config.get('translation.min_lines_per_batch', 3)
    max_lines = app_config.get('translation.max_lines_per_batch', 8)
    max_chars = app_config.get('translation.max_chars_per_batch', 4000)
    max_pause_sec = app_config.get('translation.max_pause_seconds', 1.5)
    
    logger.info(f"Creating dynamic batches with min_lines={min_lines}, max_lines={max_lines}, max_chars={max_chars}, max_pause_sec={max_pause_sec}")

    batches = []
    i = 0
    while i < len(subtitles):
        # 1. Greedily create a potential chunk up to the hard limits
        potential_batch = []
        current_chars = 0
        for j in range(i, len(subtitles)):
            sub = subtitles[j]
            
            # Check hard limits before adding the sub
            if len(potential_batch) >= max_lines or (current_chars + len(sub.text) > max_chars and len(potential_batch) > 0):
                break
            
            potential_batch.append(sub)
            current_chars += len(sub.text)

            # Check for a long pause after this sub, which is a natural split point
            is_last_sub = (j == len(subtitles) - 1)
            if not is_last_sub:
                end_ms = sub.start_ms + (sub.duration * 1000)
                pause_after = (subtitles[j+1].start_ms - end_ms) / 1000.0
                if pause_after > max_pause_sec and len(potential_batch) >= min_lines:
                    break # This pause is a good reason to end the batch

        # 2. Look backwards from the end of the chunk for the best split point
        best_split_index = len(potential_batch) # Default to splitting at the end

        for k in range(len(potential_batch) - 1, 0, -1):
            # Ensure we don't create a batch smaller than min_lines unless it's all that's left
            if k + 1 < min_lines and len(potential_batch) >= min_lines:
                continue

            sub_text = potential_batch[k].text.strip()
            if sub_text.endswith(('.', '?', '!', '。', '？', '！')):
                best_split_index = k + 1
                break # Found the best split point

        final_batch = potential_batch[:best_split_index]
        batches.append(final_batch)
        
        logger.debug(f"Finalized batch #{len(batches)} with {len(final_batch)} subtitles.")
        
        # 3. Move the main index forward
        i += len(final_batch)

    return batches

def _write_segmented_srt(output_path, original_batches, translated_segments_batches, logger):
    """Writes a new SRT file with re-segmented subtitles and approximated timings, including a safety net for duration."""
    logger.info(_("Writing segmented SRT file to {}").format(output_path))
    
    MAX_DURATION_S = 12  # No single subtitle should be longer than 12 seconds
    srt_index = 1
    with open(output_path, 'w', encoding='utf-8') as f:
        for i, original_batch in enumerate(original_batches):
            translated_segments = translated_segments_batches[i]
            if not translated_segments:
                continue

            batch_start_time_ms = original_batch[0].start_ms
            last_sub = original_batch[-1]
            batch_end_time_ms = last_sub.start_ms + (last_sub.duration * 1000)
            total_duration_ms = batch_end_time_ms - batch_start_time_ms
            
            total_chars = sum(len(seg) for seg in translated_segments)
            if total_duration_ms <= 0:
                total_duration_ms = 500 * len(translated_segments)
                logger.warning(_("Batch {} has zero or negative duration. Applying fallback timing.").format(i+1))

            if total_chars == 0:
                logger.warning(_("Batch {} has no translated characters. Skipping.").format(i+1))
                continue

            current_start_time_ms = batch_start_time_ms
            for segment_text in translated_segments:
                char_len = len(segment_text)
                proportional_duration_ms = int(total_duration_ms * (char_len / total_chars)) if total_chars > 0 else 0
                segment_duration_ms = max(proportional_duration_ms, 500) # Ensure minimum duration

                # Safety Net: If a segment is too long, split it further
                if segment_duration_ms > (MAX_DURATION_S * 1000):
                    logger.warning(_("Segment is too long ({}s), performing emergency split: {}").format(segment_duration_ms / 1000, segment_text))
                    sub_segments = _split_long_segments(segments=[segment_text], max_len=50, logger=logger)
                    
                    if len(sub_segments) > 1:
                        sub_duration_ms = segment_duration_ms // len(sub_segments)
                        for sub_segment in sub_segments:
                            segment_end_time_ms = current_start_time_ms + sub_duration_ms
                            start_td = timedelta(milliseconds=current_start_time_ms)
                            end_td = timedelta(milliseconds=segment_end_time_ms)
                            start_str = f"{(start_td.seconds // 3600):02}:{(start_td.seconds // 60) % 60:02}:{start_td.seconds % 60:02},{start_td.microseconds // 1000:03}"
                            end_str = f"{(end_td.seconds // 3600):02}:{(end_td.seconds // 60) % 60:02}:{end_td.seconds % 60:02},{end_td.microseconds // 1000:03}"
                            f.write(f"{srt_index}\n")
                            f.write(f"{start_str} --> {end_str}\n")
                            f.write(f"{sub_segment}\n\n")
                            srt_index += 1
                            current_start_time_ms = segment_end_time_ms
                        continue # Skip the original segment writing

                # Write segment as usual if not too long (or if emergency split failed)
                segment_end_time_ms = current_start_time_ms + segment_duration_ms
                start_td = timedelta(milliseconds=current_start_time_ms)
                end_td = timedelta(milliseconds=segment_end_time_ms)
                start_str = f"{(start_td.seconds // 3600):02}:{(start_td.seconds // 60) % 60:02}:{start_td.seconds % 60:02},{start_td.microseconds // 1000:03}"
                end_str = f"{(end_td.seconds // 3600):02}:{(end_td.seconds // 60) % 60:02}:{end_td.seconds % 60:02},{end_td.microseconds // 1000:03}"
                f.write(f"{srt_index}\n")
                f.write(f"{start_str} --> {end_str}\n")
                f.write(f"{segment_text}\n\n")
                srt_index += 1
                current_start_time_ms = segment_end_time_ms


def _split_long_segments(segments, max_len=80, logger=None):
    """Splits segments that are too long into smaller pieces."""
    if not segments:
        return []

    new_segments = []
    split_chars = ['。', '！', '？', '，', ',', ' ']  # Split characters in order of preference

    for segment in segments:
        if len(segment) <= max_len:
            if segment:
                new_segments.append(segment)
            continue

        if logger:
            logger.debug(f"Splitting long segment ({len(segment)} chars): {segment}")

        remaining_text = segment
        while len(remaining_text) > max_len:
            chunk_to_search = remaining_text[:max_len]
            best_split_pos = -1

            # Find the last occurrence of a preferred split character in the chunk
            for char in split_chars:
                pos = chunk_to_search.rfind(char)
                if pos > 0:
                    best_split_pos = pos + 1  # Split after the character
                    break
            
            if best_split_pos == -1:
                # If no preferred character, find the last space in a wider search
                pos = chunk_to_search.rfind(' ')
                if pos > max_len / 2: # Only split at space if it's reasonably far in
                    best_split_pos = pos + 1

            if best_split_pos == -1:
                # If no good split point found, hard-split at max_len
                best_split_pos = max_len

            new_part = remaining_text[:best_split_pos].strip()
            if new_part:
                new_segments.append(new_part)
            remaining_text = remaining_text[best_split_pos:].strip()

        if remaining_text:
            new_segments.append(remaining_text)

    return new_segments

def _balance_translated_segments(texts, lang, logger):
    """Try to balance translated segments to avoid unnatural breaks for TTS."""
    if not texts:
        return []

    logger.info(_("Performing smart balancing on translated segments for language: {}").format(lang))

    # Define sentence-ending punctuation for different languages
    punctuation_map = {
        'zh': ['。', '！', '？', '；', '……', '，'],
        'ja': ['。', '！', '？', '；', '…', '、'],
        'en': ['.', '!', '?', ';', ','],
    }
    
    lang_prefix = lang.split('-')[0]
    end_punctuation = punctuation_map.get(lang_prefix, punctuation_map.get('en'))
    joiner = " " if lang_prefix == 'en' else ""

    balanced_texts = list(texts)
    
    i = 0
    while i < len(balanced_texts) - 1:
        current_segment = balanced_texts[i].strip()
        
        # If current segment is empty or already ends well, skip it
        if not current_segment or any(current_segment.endswith(p) for p in end_punctuation):
            i += 1
            continue

        # This segment needs fixing. Look at the next one.
        next_segment = balanced_texts[i+1].strip()
        if not next_segment:
            i += 1
            continue

        # Find the first punctuation mark in the next segment.
        split_point = -1
        for p in end_punctuation:
            pos = next_segment.find(p)
            if pos != -1:
                if split_point == -1 or pos < split_point:
                    split_point = pos
        
        # If we found a punctuation mark to split by
        if split_point != -1:
            part_to_move = next_segment[:split_point + 1]
            remainder = next_segment[split_point + 1:].lstrip()

            # Combine and update
            new_current = current_segment + joiner + part_to_move
            balanced_texts[i] = new_current
            balanced_texts[i+1] = remainder
            
            logger.debug(f"Balancing segments {i} and {i+1}:")
            logger.debug(f"  - Original: ['{current_segment}'] ['{next_segment}']")
            logger.debug(f"  - New:      ['{new_current}'] ['{remainder}']")

        i += 1
    
    return balanced_texts

def translate_srt_core(worker, srt_path, translator_name, source_lang, target_lang, model_config=None, video_type="常规", translation_style="常规", no_think=False, progress_callback=None, stream_callback=None, batch_start_callback=None, batch_done_callback=None, all_done_callback=None, timing_callback=None, is_stopped_func=None, time_tick_callback=None, logger=None):
    """Core logic for subtitle translation."""
    total_start_time = time.time()
    logger = logger or get_logger(__name__)
    is_stopped = is_stopped_func or (lambda: False)

    last_tick_time = 0
    def tick_time():
        nonlocal last_tick_time
        now = time.time()
        if time_tick_callback and (now - last_tick_time > 0.2):
            elapsed_time = now - total_start_time
            time_tick_callback(elapsed_time, 0)
            last_tick_time = now
    
    def progress(msg):
        if progress_callback: progress_callback(msg)
        logger.info(msg)

    def record_time(stage, elapsed_time):
        logger.debug(_("⏱️  [Timing] {stage} took: {elapsed_time:.2f} s").format(stage=stage, elapsed_time=elapsed_time))
        if timing_callback:
            timing_callback(stage, elapsed_time)

    if is_stopped(): return None
    progress(_("Starting to parse SRT file for translation..."))
    stage_start_time = time.time()
    parser = SRTParser(srt_path)
    parser.parse()
    record_time(_("Parse SRT file"), time.time() - stage_start_time)
    progress(_("File parsed successfully, found {} subtitles.").format(len(parser.subtitles)))

    if is_stopped(): return None
    progress(_("Initializing translation service..."))
    stage_start_time = time.time()
    if translator_name == OpenAITranslator.get_name():
        translator = OpenAITranslator()
    elif translator_name == MicrosoftTranslator.get_name():
        translator = MicrosoftTranslator()
    else:
        raise NotImplementedError(_("Translation service '{}' is not supported yet.").format(translator_name))
    record_time(_("Initialize translator"), time.time() - stage_start_time)

    def internal_stream_callback(chunk):
        if stream_callback: stream_callback(chunk)
        tick_time()

    # Generate output path
    base, ext = os.path.splitext(srt_path)
    LOCALE_MAP = {"zh": "zh-CN", "en": "en-US", "ja": "ja-JP", "ko": "ko-KR", "fr": "fr-FR", "de": "de-DE", "ru": "ru-RU"}
    target_locale = LOCALE_MAP.get(target_lang, target_lang)
    lang_code_pattern = re.compile(r'\.([a-z]{2,3}(-[A-Z]{2})?)')
    base = lang_code_pattern.sub('', base)
    output_path = f"{base}.{target_locale}{ext}"

    # New: Intelligent Segmentation Workflow
    if app_config.get('translation.enable_intelligent_segmentation') and isinstance(translator, OpenAITranslator):
        progress(_("Starting translation with Intelligent Segmentation (Concurrent)..."))
        stage_start_time = time.time()

        original_batches = _create_dynamic_batches(parser.subtitles, logger)
        total_batches = len(original_batches)
        
        service_config = app_config.get_service_config_by_url(model_config.get("base_url"))
        rpm_limit = service_config.get("rpm_limit", 5) if service_config else 5
        max_workers = rpm_limit
        logger.info(_("Concurrency limit set to {}").format(max_workers))

        translated_batches = [None] * total_batches
        
        if batch_start_callback: batch_start_callback(0, total_batches)

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_index = {}
            for i, batch in enumerate(original_batches):
                if is_stopped(): raise InterruptedError("Translation was cancelled by user.")
                future = executor.submit(
                    translator.segment_and_translate,
                    texts=[sub.text for sub in batch],
                    source_lang=source_lang,
                    target_lang=target_lang,
                    model_config=model_config,
                    translation_style=translation_style,
                    video_type=video_type,
                    stream_callback=None, # Stream callback is not practical for concurrent batches
                    is_cancelled=is_stopped
                )
                future_to_index[future] = i

            processed_count = 0
            for future in as_completed(future_to_index):
                if is_stopped(): raise InterruptedError("Translation was cancelled by user.")
                index = future_to_index[future]
                try:
                    result = future.result()
                    sanitized_batch = _split_long_segments(segments=result, logger=logger)
                    translated_batches[index] = sanitized_batch
                    progress(_("Successfully processed batch for original subtitles starting at #{}").format(original_batches[index][0].index))
                except Exception as e:
                    logger.error(f"Batch {index} failed: {e}")
                    translated_batches[index] = [] # Store empty result on failure
                
                processed_count += 1
                if batch_done_callback: batch_done_callback(processed_count, total_batches)
                tick_time()

        record_time(_("Translate & Segment Text"), time.time() - stage_start_time)
        progress(_("All batches segmented and translated."))
        
        if all_done_callback:
            original_texts = [sub.text for sub in parser.subtitles]
            flat_translated_texts = [text for batch in translated_batches for text in batch]
            # This callback expects a flat list of pairs, which doesn't perfectly match the new structure.
            # We provide a simplified version for preview purposes.
            all_done_callback(list(zip(original_texts, flat_translated_texts + [""] * (len(original_texts) - len(flat_translated_texts)))))

        progress(_("Generating new segmented SRT file..."))
        stage_start_time = time.time()
        _write_segmented_srt(output_path, original_batches, translated_batches, logger)
        record_time(_("Generate Segmented SRT"), time.time() - stage_start_time)

    # Original Workflow
    else:
        progress(_("Starting standard translation..."))
        stage_start_time = time.time()
        
        original_texts = parser.get_texts()

        def internal_batch_callback(current_batch, total_batches, batch_size=0):
            if is_stopped(): raise InterruptedError("Translation was cancelled by user.")
            if batch_done_callback: batch_done_callback(current_batch, total_batches)
            tick_time()

        translate_args = {
            "texts": original_texts,
            "source_lang": source_lang,
            "target_lang": target_lang,
            "video_type": video_type,
            "translation_style": translation_style,
            "batch_start_callback": batch_start_callback,
            "batch_done_callback": internal_batch_callback,
            "stream_callback": internal_stream_callback,
            "is_cancelled": is_stopped
        }
        if translator_name == OpenAITranslator.get_name():
            if model_config is not None: translate_args["model_config"] = model_config
            translate_args["no_think"] = no_think

        translated_texts = translator.translate(**translate_args)
        record_time(_("Translate text"), time.time() - stage_start_time)
        
        if is_stopped(): return None

        progress(_("Translation finished, got {} translated lines.").format(len(translated_texts)))
        if len(original_texts) != len(translated_texts):
            progress(_("[Warning] Mismatch between original and translated text count! Original: {}, Translated: {}").format(len(original_texts), len(translated_texts)))
        
        stage_start_time = time.time()
        balanced_texts = _balance_translated_segments(translated_texts, target_lang, logger)
        record_time(_("Balance Segments"), time.time() - stage_start_time)

        if all_done_callback:
            all_done_callback(list(zip(original_texts, balanced_texts)))
        
        progress(_("All batches translated, generating new SRT file..."))
        stage_start_time = time.time()
        parser.write(output_path, balanced_texts)
        record_time(_("Generate SRT file"), time.time() - stage_start_time)

    progress(_("Translation successful! Output file: {}").format(output_path))
    total_elapsed_time = time.time() - total_start_time
    record_time(_("Total time"), total_elapsed_time)
    
    return output_path

class InterruptedError(Exception):
    pass

class TranslationWorker(BaseWorker):
    time_updated = Signal(float, float)
    translation_chunk = Signal(str)
    translation_done = Signal(list)
    batch_started = Signal(int, int)
    batch_completed = Signal(int, int)
    timing_updated = Signal(str, float)
    task_finished = Signal(str, str)

    def __init__(self, srt_path, translator_name, source_lang, target_lang, model_config=None, video_type="常规", translation_style="常规", no_think=False, logger=None):
        super().__init__()
        self.output_path = None
        self.srt_path = srt_path
        self.translator_name = translator_name
        self.source_lang = source_lang
        self.target_lang = target_lang
        self.model_config = model_config
        self.video_type = video_type
        self.translation_style = translation_style
        self.no_think = no_think
        self.logger = logger or get_logger(__name__)

    def core_run(self):
        try:
            self.output_path = translate_srt_core(
                self,
                self.srt_path,
                self.translator_name,
                self.source_lang,
                self.target_lang,
                model_config=self.model_config,
                video_type=self.video_type,
                translation_style=self.translation_style,
                no_think=self.no_think,
                progress_callback=self.progress.emit,
                stream_callback=self.translation_chunk.emit,
                batch_start_callback=self.batch_started.emit,
                batch_done_callback=self.batch_completed.emit,
                all_done_callback=self.translation_done.emit,
                timing_callback=self.timing_updated.emit,
                is_stopped_func=self.isInterruptionRequested,
                time_tick_callback=self.time_updated.emit,
                logger=self.logger
            )
            
            if self.isInterruptionRequested():
                self.progress.emit(_("Translation was manually stopped by the user."))
                return

            if self.output_path:
                self.task_finished.emit(_("Translation successful!"), self.output_path)
            else:
                if not self.isInterruptionRequested():
                    raise Exception(_("Translation core logic did not return an output path and did not report an error."))
        except TranslationError as e:
            self.error.emit(str(e))
            raise
        except Exception as e:
            self.error.emit(_("An unknown error occurred: {}").format(e))
            raise

    def stop(self):
        self.requestInterruption()
        self.progress.emit(_("Stopping translation task..."))
        super().stop()
        
    def get_output_path(self):
        return self.output_path
