import os
import re
import sys
import asyncio
import tempfile
import shutil
import time
from concurrent.futures import ThreadPoolExecutor
from PySide6.QtCore import Signal
from src.ui.base_worker import BaseWorker
from src.utils.srt_parser import SRTParser
from src.translation.edge_tts import EdgeTTS
# Import other TTS services here as they are created
from src.translation.base_tts import BaseTTS
from src.utils.process import ProcessManager
from src.utils.logger import get_logger
from src.ui.i18n import _


SUPPORTED_TTS_SERVICES = {
    EdgeTTS.get_name(): EdgeTTS,
    # Add other services here, e.g., "GoogleTTS": GoogleTTS
}

def _get_tts_service(service_name: str) -> BaseTTS:
    """Factory function to get a TTS service instance by name."""
    tts_class = SUPPORTED_TTS_SERVICES.get(service_name)
    if not tts_class:
        raise NotImplementedError(f"{_('TTS service')} '{service_name}' {_('is not supported yet.')}")
    return tts_class()


class CancelledError(Exception):
    """Custom exception to indicate that the task was cancelled by the user."""
    pass

# This is a helper class to pass context and callbacks through the core functions
class DubbingContext:
    def __init__(self, srt_path, tts_service_name, voice, locale, work_dir,
                 logger, ui_log_callback=None, clips_dir=None, concurrent_tasks=10, is_stopped_func=None, start_time=None,
                 process_manager=None, preserve_audio_duration=False):
        self.srt_path = srt_path
        self.tts_service_name = tts_service_name
        self.voice = voice
        self.locale = locale
        self.work_dir = work_dir
        self.concurrent_tasks = concurrent_tasks
        self.clips_dir = clips_dir or work_dir
        os.makedirs(self.clips_dir, exist_ok=True)
        self.logger = logger
        self.ui_log_callback = ui_log_callback or (lambda msg: None)
        self._is_stopped_func = is_stopped_func
        self.process_manager = process_manager or ProcessManager()
        self.start_time = start_time or time.time()
        self.preserve_audio_duration = preserve_audio_duration

    def is_stopped(self):
        return self._is_stopped_func and self._is_stopped_func()

    def progress(self, message):
        """Send a progress update to the main UI thread and log to file."""
        self.logger.info(message)
        self.ui_log_callback(message)

    def log(self, message):
        """Send a debug/verbose log message to the file and optionally to the UI."""
        self.logger.debug(message)
        # Decide if you want debug messages on the UI as well.
        # For now, let's keep it clean and only send progress messages.
        # self.ui_log_callback(message)


def _get_audio_duration(file_path, logger):
    """Get audio duration using ffprobe."""
    command = [
        "ffprobe", "-v", "error", "-show_entries", "format=duration",
        "-of", "default=noprint_wrappers=1:nokey=1", file_path
    ]
    
    # For commands like ffprobe that need to capture output, we use subprocess.run
    # but we can still log the command itself
    import subprocess
    logger.debug(f"Executing: {' '.join(command)}")
    result = subprocess.run(command, capture_output=True, text=True, check=True, creationflags=subprocess.CREATE_NO_WINDOW if sys.platform == "win32" else 0)
    return float(result.stdout)

async def _generate_audio_clips(ctx, subtitles):
    """Concurrently generate all audio clips, limiting concurrency to avoid too many open files."""
    if ctx.is_stopped(): raise CancelledError()
    
    total_clips = len(subtitles)

    # --- TTS Cache Check ---
    ctx.progress(_("Checking for existing audio clips..."))
    all_clips_exist = True
    for i in range(total_clips):
        output_file = os.path.join(ctx.clips_dir, f"clip_{i:04d}.mp3")
        if not os.path.exists(output_file) or os.path.getsize(output_file) == 0:
            all_clips_exist = False
            break
    
    if all_clips_exist:
        ctx.progress(_("All {total_clips} audio clips already exist. Skipping TTS generation.").format(total_clips=total_clips))
        return
    # --- End Cache Check ---

    try:
        tts_service = _get_tts_service(ctx.tts_service_name)
    except NotImplementedError as e:
        raise e
    
    ctx.log(_("Generating audio with {concurrent_tasks} parallel tasks.").format(concurrent_tasks=ctx.concurrent_tasks))
    semaphore = asyncio.Semaphore(ctx.concurrent_tasks)

    async def generate_clip(i, sub):
        async with semaphore:
            if ctx.is_stopped(): return _("Task {i_plus_1} was cancelled.").format(i_plus_1=i+1)
            
            ctx.log(_("Generating clip {i_plus_1}/{total_clips}").format(i_plus_1=i+1, total_clips=total_clips))
            output_file = os.path.join(ctx.clips_dir, f"clip_{i:04d}.mp3")
            max_retries = 3
            
            if not sub.text.strip():
                ctx.log(_("Task {i_plus_1}: Skipping empty text, generating {duration:.2f}s of silence.").format(i_plus_1=i+1, duration=sub.duration))
                command = [
                    "ffmpeg", "-f", "lavfi", "-i", f"anullsrc=r=48000:cl=mono",
                    "-t", str(sub.duration), "-c:a", "libmp3lame", "-q:a", "9", "-y", output_file
                ]
                ctx.process_manager.run(command, log_callback=ctx.log)
                return None

            for attempt in range(max_retries):
                if ctx.is_stopped(): return _("Task {i_plus_1} was cancelled during retry.").format(i_plus_1=i+1)
                try:
                    # Use f-string for safer formatting
                    debug_msg = f"{_('Task')} {i+1} ({_('Attempt')} {attempt+1}/{max_retries}): voice='{ctx.voice}', text='{sub.text[:30]}...'"
                    ctx.log(debug_msg)
                    
                    # Generate audio at normal speed. Speed adjustments will be handled by ffmpeg.
                    await tts_service.save(sub.text, output_file, ctx.voice)
                    
                    if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
                        return None
                    else:
                        raise ValueError(_("TTS service did not return valid audio data."))
                except Exception as e:
                    # Use f-string for safer formatting
                    ctx.progress(f"⚠️ {_('Task')} {i+1} {_('attempt')} {attempt+1} {_('failed')}: {e}")
                    if attempt < max_retries - 1:
                        await asyncio.sleep(1)
                    else:
                        # Use f-string for safer formatting, especially for the error 'e'
                        error_msg = f"❌ {_('Task')} {i+1} {_('failed after')} {max_retries} {_('attempts')} (text: '{sub.text[:30]}...'): {e}"
                        ctx.progress(error_msg)
                        ctx.progress(_("Generating {duration:.2f}s of silence for task {i_plus_1} as a fallback.").format(duration=sub.duration, i_plus_1=i+1))
                        
                        command = [
                            "ffmpeg", "-f", "lavfi", "-i", f"anullsrc=r=48000:cl=mono",
                            "-t", str(sub.duration), "-c:a", "libmp3lame", "-q:a", "9", "-y", output_file
                        ]
                        ctx.process_manager.run(command, log_callback=ctx.log)
                        return None
            return _("Task {i_plus_1} failed to execute, this should not happen.").format(i_plus_1=i+1)

    tasks = [generate_clip(i, sub) for i, sub in enumerate(subtitles)]
    
    results = await asyncio.gather(*tasks)
    
    if ctx.is_stopped(): raise CancelledError()

    errors = [res for res in results if res is not None]
    if errors:
        combined_errors = "\n".join(errors)
        # Use f-string for safer formatting
        raise RuntimeError(f"{_('Error generating some audio clips')}:\n{combined_errors}")

def _concatenate_audio_naturally(ctx, subtitles):
    """Concatenates audio clips sequentially without any tempo modification."""
    ctx.progress(_("Starting natural audio concatenation..."))
    
    # 1. Create a list of all existing clip files
    concat_list_path = os.path.join(ctx.work_dir, "concat_list.txt")
    with open(concat_list_path, 'w', encoding='utf-8') as f:
        for i in range(len(subtitles)):
            clip_path = os.path.join(ctx.clips_dir, f"clip_{i:04d}.mp3")
            if os.path.exists(clip_path) and os.path.getsize(clip_path) > 0:
                f.write(f"file '{os.path.abspath(clip_path)}'\n")
            else:
                ctx.logger.warning(f"Audio clip {clip_path} is missing or empty, it will be skipped.")

    # 2. Define output path
    base_name = os.path.splitext(os.path.basename(ctx.srt_path))[0]
    output_dir = os.path.dirname(ctx.srt_path)
    lang_code_pattern = re.compile(r'\.([a-z]{2,3}(-[A-Z]{2})?)$')
    base_name = lang_code_pattern.sub('', base_name)
    output_path = os.path.join(output_dir, f"{base_name}.{ctx.locale}.m4a")

    # 3. Run ffmpeg concat demuxer
    command = [
        "ffmpeg", "-y",
        "-f", "concat",
        "-safe", "0",
        "-i", concat_list_path,
        "-c:a", "aac", "-b:a", "192k",
        output_path
    ]
    ctx.process_manager.run(command, log_callback=ctx.log)

    # 4. Cleanup
    os.remove(concat_list_path)

    ctx.progress(_("Natural audio concatenation complete."))
    return output_path

def _concatenate_audio(ctx, subtitles):
    """
    Final audio mixing, using batch processing to avoid "too many open files" errors.
    Employs a dynamic timeline strategy and time-scales excessively long audio clips to ensure total duration matches.
    """
    total_clips = len(subtitles)

    # --- 1. Prepare file list and fetch audio durations in parallel ---
    ctx.progress(_("Preparing audio files..."))
    all_audio_files = []
    for i in range(total_clips):
        file_path = os.path.join(ctx.clips_dir, f"clip_{i:04d}.mp3")
        if not os.path.exists(file_path) or os.path.getsize(file_path) == 0:
            raise RuntimeError(f"{_('Missing or empty audio file')}: {file_path}.")
        all_audio_files.append(file_path)

    ctx.progress(_("Calculating audio durations..."))
    with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
        # Pass the logger object, not the callback function
        durations = list(executor.map(lambda p: _get_audio_duration(p, ctx.logger), all_audio_files))

    if ctx.is_stopped(): raise CancelledError()

    # --- 2. Plan tempo for each clip with merging ---
    ctx.progress(_("Planning audio tempo strategy..."))
    
    MERGE_TEMPO_THRESHOLD = 1.20
    ctx.log(_("Using a tempo threshold of {threshold:.2f} to merge fast clips.").format(threshold=MERGE_TEMPO_THRESHOLD))

    planned_clips = []
    i = 0
    while i < total_clips:
        if ctx.is_stopped(): raise CancelledError()
        
        subtitle_duration = subtitles[i].duration
        audio_duration = durations[i]

        if subtitle_duration <= 0:
            planned_clips.append({ 'tempo': 1.0, 'input_file': all_audio_files[i], 'original_duration': audio_duration, 'srt_start_ms': subtitles[i].start_ms })
            i += 1
            continue

        individual_tempo = audio_duration / subtitle_duration

        if individual_tempo > MERGE_TEMPO_THRESHOLD and (i + 1) < total_clips:
            ctx.log(_("Clip {i_plus_1}: Tempo {tempo:.2f} exceeds threshold. Merging with next clip.").format(i_plus_1=i+1, tempo=individual_tempo))
            
            j = i + 1
            total_audio_duration = audio_duration + durations[j]
            total_subtitle_duration = subtitle_duration + subtitles[j].duration
            combined_tempo = total_audio_duration / total_subtitle_duration if total_subtitle_duration > 0 else 1.0
            
            if combined_tempo > 2.0: combined_tempo = 2.0
            if combined_tempo < 0.5: combined_tempo = 0.5
            ctx.log(_("Processing merged clips {i_plus_1}-{j_plus_1} with combined tempo {tempo:.2f}.").format(i_plus_1=i+1, j_plus_1=j+1, tempo=combined_tempo))

            planned_clips.append({ 'tempo': combined_tempo, 'input_file': all_audio_files[i], 'original_duration': audio_duration, 'srt_start_ms': subtitles[i].start_ms })
            planned_clips.append({ 'tempo': combined_tempo, 'input_file': all_audio_files[j], 'original_duration': durations[j], 'srt_start_ms': subtitles[j].start_ms })
            i += 2
        else:
            tempo = individual_tempo
            if tempo > 2.0: tempo = 2.0
            if tempo < 0.5: tempo = 0.5
            if abs(tempo - 1.0) > 0.01:
                ctx.log(_("Clip {i_plus_1}: Processing with tempo {tempo:.2f}.").format(i_plus_1=i+1, tempo=tempo))
            planned_clips.append({ 'tempo': tempo, 'input_file': all_audio_files[i], 'original_duration': audio_duration, 'srt_start_ms': subtitles[i].start_ms })
            i += 1

    # --- 3. Harmonize audio pace for consistency ---
    ctx.progress(_("Harmonizing audio pace..."))
    
    # Calculate the average tempo of all clips that were significantly sped up.
    fast_tempos = [c['tempo'] for c in planned_clips if c['tempo'] > 1.05]
    
    if fast_tempos:
        avg_fast_tempo = sum(fast_tempos) / len(fast_tempos)
        ctx.log(_("Average speed-up tempo is {avg_fast_tempo:.2f}. Adjusting slow clips to match this pace.").format(avg_fast_tempo=avg_fast_tempo))
        
        # This factor determines how much slower a clip can be than the average before it gets sped up.
        # e.g., 0.85 means clips slower than 85% of the avg fast tempo will be adjusted.
        HARMONIZATION_THRESHOLD_FACTOR = 0.85
        
        for i, clip in enumerate(planned_clips):
            # If a clip is significantly slower than the average pace, adjust it.
            if clip['tempo'] < avg_fast_tempo * HARMONIZATION_THRESHOLD_FACTOR:
                original_tempo = clip['tempo']
                # Bring the slow clip up to the average speed
                clip['tempo'] = avg_fast_tempo
                ctx.log(_("Clip {i_plus_1}: Harmonizing tempo from {original_tempo:.2f} to {new_tempo:.2f} to match average pace.").format(i_plus_1=i + 1, original_tempo=original_tempo, new_tempo=clip['tempo']))

        # Re-clamp tempo values after harmonization to ensure they are within safe limits
        for clip in planned_clips:
            if clip['tempo'] > 2.0:
                clip['tempo'] = 2.0
            if clip['tempo'] < 0.5:
                clip['tempo'] = 0.5
    
    if ctx.is_stopped(): raise CancelledError()

    # --- 4. Calculate final timeline to prevent overlaps ---
    ctx.progress(_("Calculating final timeline..."))
    clip_data = []
    timeline_cursor_ms = 0
    for i, clip in enumerate(planned_clips):
        if ctx.is_stopped(): raise CancelledError()
        ctx.log(_("Calculating timeline for clip {i_plus_1}/{total_clips}").format(i_plus_1=i+1, total_clips=total_clips))

        new_duration_ms = (clip['original_duration'] / clip['tempo']) * 1000
        
        # The clip must start either at its SRT time or after the previous clip has finished, whichever is later.
        actual_start_ms = max(clip['srt_start_ms'], timeline_cursor_ms)
        
        clip_data.append({
            'actual_start_ms': actual_start_ms,
            'tempo': clip['tempo'],
            'input_file': clip['input_file']
        })
        
        # Update the cursor to the end time of the current clip
        timeline_cursor_ms = actual_start_ms + new_duration_ms
        ctx.log(_("Clip {i_plus_1}: Scheduled to start at {start_time:.2f}s.").format(i_plus_1=i+1, start_time=actual_start_ms / 1000))

    if ctx.is_stopped(): raise CancelledError()

    # --- 5. Batch Processing ---
    intermediate_files = []
    batch_size = 30
    num_batches = (total_clips + batch_size - 1) // batch_size

    for i in range(num_batches):
        if ctx.is_stopped(): raise CancelledError()
        
        batch_start_index = i * batch_size
        batch_end_index = min((i + 1) * batch_size, total_clips)
        
        batch_clip_data = clip_data[batch_start_index:batch_end_index]
        
        if not batch_clip_data:
            continue

        ctx.log(_("Merging batch {i_plus_1}/{num_batches}").format(i_plus_1=i+1, num_batches=num_batches))

        # Delay within a batch should be relative to the batch start time
        batch_start_time_ms = batch_clip_data[0]['actual_start_ms']

        input_files_args = []
        for data in batch_clip_data:
            input_files_args.extend(["-i", data['input_file']])

        filter_complex_parts = []
        amix_inputs = []
        
        for j, data in enumerate(batch_clip_data):
            input_stream = f"[{j}:a]"
            processed_stream = f"[a{j}]"
            final_stream = f"[f{j}]"

            # 1. Apply speed adjustment (tempo) if needed
            if data['tempo'] != 1.0:
                atempo_filter = f"{input_stream}atempo={data['tempo']}{processed_stream}"
            else:
                atempo_filter = f"{input_stream}anull{processed_stream}" # anull is a no-op filter
            
            # 2. Apply delay relative to the batch start time using the calculated actual_start_ms
            relative_delay_ms = data['actual_start_ms'] - batch_start_time_ms
            if relative_delay_ms < 0:
                # This can happen if a batch starts mid-way through a long clip from a previous batch.
                # The delay should be 0 in this context.
                relative_delay_ms = 0
            
            adelay_filter = f"{processed_stream}adelay={int(relative_delay_ms)}|{int(relative_delay_ms)}{final_stream}"
            
            filter_complex_parts.append(atempo_filter)
            filter_complex_parts.append(adelay_filter)
            amix_inputs.append(final_stream)
            
        filter_complex_parts.append(f"{''.join(amix_inputs)}amix=inputs={len(batch_clip_data)}:duration=longest:normalize=0[aout]")
        filter_complex_str = "; ".join(filter_complex_parts)
        
        intermediate_output = os.path.join(ctx.clips_dir, f"intermediate_{i}.m4a")
        intermediate_files.append(intermediate_output)

        # Use filter_complex_script to avoid overly long command lines
        filter_script_path = None
        try:
            with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix='.txt', encoding='utf-8', dir=ctx.clips_dir) as filter_file:
                filter_file.write(filter_complex_str)
                filter_script_path = filter_file.name
            
            command = [
                "ffmpeg", *input_files_args, "-filter_complex_script", filter_script_path,
                "-map", "[aout]", "-c:a", "aac", "-b:a", "192k", "-y", intermediate_output
            ]
            ctx.process_manager.run(command, log_callback=ctx.log)
        finally:
            # Clean up temporary filter file
            if filter_script_path and os.path.exists(filter_script_path):
                os.remove(filter_script_path)

    if ctx.is_stopped(): raise CancelledError()

    # --- 6. Merge final files ---
    ctx.progress(_("Merging all audio clips..."))
    
    # Use the srt filename as the output filename, and place it next to the srt file
    base_name = os.path.splitext(os.path.basename(ctx.srt_path))[0]
    output_dir = os.path.dirname(ctx.srt_path)

    # Regex to find and remove existing language codes (e.g., .en, .zh-CN)
    lang_code_pattern = re.compile(r'\.([a-z]{2,3}(-[A-Z]{2})?)$')
    base_name = lang_code_pattern.sub('', base_name)

    output_path = os.path.join(output_dir, f"{base_name}.{ctx.locale}.m4a")

    if len(intermediate_files) == 1:
        shutil.move(intermediate_files[0], output_path)
    elif len(intermediate_files) > 1:
        # Each intermediate file starts at time 0. Now we need to apply the absolute start time delay of its batch to each file, then mix.
        final_input_args = [arg for path in intermediate_files for arg in ("-i", path)]
        
        filter_complex_parts = []
        amix_inputs = []
        for i, path in enumerate(intermediate_files):
            batch_start_index = i * batch_size
            # The absolute start time of this batch
            batch_start_time_ms = clip_data[batch_start_index]['actual_start_ms']
            
            input_stream = f"[{i}:a]"
            delayed_stream = f"[d{i}]"
            
            # Apply absolute delay to the entire intermediate file
            filter_complex_parts.append(f"{input_stream}adelay={batch_start_time_ms}|{batch_start_time_ms}{delayed_stream}")
            amix_inputs.append(delayed_stream)
            
        filter_complex_parts.append(f"{''.join(amix_inputs)}amix=inputs={len(intermediate_files)}:duration=longest:normalize=0[aout]")
        filter_complex_str = "; ".join(filter_complex_parts)

        command = [
            "ffmpeg", *final_input_args, "-filter_complex", filter_complex_str,
            "-map", "[aout]", "-c:a", "aac", "-b:a", "192k", "-y", output_path
        ]
        ctx.process_manager.run(command, log_callback=ctx.log)

    # Clean up intermediate files
    for path in intermediate_files:
        try:
            os.remove(path)
        except OSError as e:
            ctx.progress(f"⚠️ {_('Could not delete intermediate file')} {path}: {e}")

    ctx.progress(_("Merging complete."))
    return output_path


def dub_srt_core(ctx: DubbingContext):
    """Core logic for dubbing."""
    ctx.progress(_("Starting to parse SRT file for dubbing..."))
    parser = SRTParser(ctx.srt_path)
    parser.parse()
    subtitles = parser.subtitles
    ctx.progress(_("File parsed successfully, found {num_subtitles} subtitles.").format(num_subtitles=len(subtitles)))
    
    if ctx.is_stopped(): raise CancelledError()
    
    # The caller is responsible for running the event loop
    asyncio.run(_generate_audio_clips(ctx, subtitles))
    
    if ctx.is_stopped(): raise CancelledError()
    
    if ctx.preserve_audio_duration:
        ctx.logger.info("Preserving natural audio duration, using simple concatenation.")
        output_path = _concatenate_audio_naturally(ctx, subtitles)
    else:
        ctx.logger.info("Applying tempo adjustments to match subtitle timing.")
        output_path = _concatenate_audio(ctx, subtitles)
    
    ctx.progress(_("Dubbing successful!"))
    return output_path

class DubbingWorker(BaseWorker):
    """
    Executes the dubbing task in the background.
    """
    log_message = Signal(str)
    progress_update = Signal(int, int, str) # value, total, stage
    task_finished = Signal(str, str) # success message, output file path

    def __init__(self, srt_path, tts_service_name, voice, locale, concurrent_tasks=10):
        super().__init__()
        self.srt_path = srt_path
        self.tts_service_name = tts_service_name
        self.voice = voice
        self.locale = locale
        self.concurrent_tasks = concurrent_tasks
        self.process_manager = ProcessManager()
        self.logger = get_logger(__name__)

    def core_run(self):
        from src.config import app_config
        srt_name = os.path.splitext(os.path.basename(self.srt_path))[0]
        workspace_root = app_config.get('paths.workspace', 'trans')
        work_dir = os.path.join(workspace_root, 'dubbing_clips', srt_name)
        os.makedirs(work_dir, exist_ok=True)

        ctx = DubbingContext(
            srt_path=self.srt_path,
            tts_service_name=self.tts_service_name,
            voice=self.voice,
            locale=self.locale,
            work_dir=work_dir,
            logger=self.logger,
            ui_log_callback=self.log_message.emit, # Pass the emit method as a callback
            concurrent_tasks=self.concurrent_tasks,
            is_stopped_func=self.isInterruptionRequested,
            process_manager=self.process_manager
        )

        output_path = dub_srt_core(ctx)
        
        if self.isInterruptionRequested():
            # If the task was cancelled, dub_srt_core will raise a CancelledError,
            # which is caught by BaseWorker.run. We just need to ensure we don't emit the success signal.
            return

        if output_path:
            self.task_finished.emit(_("Dubbing successful!"), output_path)
        else:
            # If there's no output path but also no exception, it's likely a logic error
            if not self.isInterruptionRequested():
                raise RuntimeError(_("Dubbing core task finished but did not return an output path."))
