import os
import tempfile
import shutil
import sys
import functools
import subprocess
import json
import re
import srt # pysrt library

from src.ui.i18n import _
from PySide6.QtCore import Signal

from src.config import app_config
from src.ui.base_worker import BaseWorker
from src.processing.vocal_remover import VocalRemover
from src.utils.formatter import get_track_title, get_language_code_from_path
from src.utils.logger import setup_logger, get_logger
from src.utils.process import ProcessManager


@functools.lru_cache(maxsize=None)
def _is_encoder_available(encoder, logger):
    """Check if a specific ffmpeg encoder is available."""
    logger.debug(f"Checking for ffmpeg encoder: {encoder}")
    try:
        # Use subprocess.CREATE_NO_WINDOW on Windows to prevent console window from popping up
        creationflags = 0
        if sys.platform == 'win32':
            creationflags = subprocess.CREATE_NO_WINDOW

        result = subprocess.run(
            ["ffmpeg", "-hide_banner", "-encoders"],
            capture_output=True,
            text=True,
            check=True,
            encoding='utf-8',
            errors='replace',
            creationflags=creationflags
        )
        for line in result.stdout.splitlines():
            # Look for lines describing video encoders (they start with 'V')
            if line.strip().startswith('V'):
                parts = line.strip().split()
                if len(parts) > 1 and parts[1] == encoder:
                    logger.debug(f"Encoder '{encoder}' is available.")
                    return True
        logger.warning(f"Encoder '{encoder}' is not available.")
        return False
    except FileNotFoundError:
        logger.warning("ffmpeg not found. Cannot check for hardware encoders.")
        return False
    except subprocess.CalledProcessError as e:
        logger.warning(f"Error checking for ffmpeg encoders: {e}")
        return False

def _get_best_video_codec(logger):
    """Automatically select the best available video codec based on the platform."""
    platform = sys.platform
    
    if platform == "darwin":
        if _is_encoder_available("h264_videotoolbox", logger):
            logger.info("Using 'h264_videotoolbox' for hardware acceleration on macOS.")
            return "h264_videotoolbox"
    
    elif platform == "win32":
        if _is_encoder_available("h264_nvenc", logger):
            logger.info("Using 'h264_nvenc' for hardware acceleration on Windows (NVIDIA).")
            return "h264_nvenc"
        if _is_encoder_available("h264_qsv", logger):
            logger.info("Using 'h264_qsv' for hardware acceleration on Windows (Intel).")
            return "h264_qsv"
            
    elif platform.startswith("linux"):
        if _is_encoder_available("h264_nvenc", logger):
            logger.info("Using 'h264_nvenc' for hardware acceleration on Linux (NVIDIA).")
            return "h264_nvenc"
        if _is_encoder_available("h264_vaapi", logger):
            logger.info("Using 'h264_vaapi' for hardware acceleration on Linux (AMD/Intel).")
            return "h264_vaapi"

    logger.info("No hardware acceleration available or detected, falling back to software encoding 'libx264'.")
    return "libx264"

def _get_media_duration(file_path, logger):
    """Get the duration of a media file using ffprobe."""
    command = [
        "ffprobe",
        "-v", "error",
        "-show_entries", "format=duration",
        "-of", "default=noprint_wrappers=1:nokey=1",
        file_path
    ]
    try:
        result = subprocess.run(command, capture_output=True, text=True, check=True, creationflags=subprocess.CREATE_NO_WINDOW if sys.platform == "win32" else 0)
        return float(result.stdout.strip())
    except FileNotFoundError:
        logger.error("ffprobe not found. Please ensure ffmpeg is installed and in your PATH.")
        raise
    except (subprocess.CalledProcessError, ValueError) as e:
        logger.error(f"Error getting duration for {file_path}: {e}")
        return None

def _parse_srt(srt_path):
    """Parse an SRT file and return a list of subtitle objects."""
    with open(srt_path, 'r', encoding='utf-8') as f:
        return list(srt.parse(f.read()))

# Helper classes and functions
class CompositionContext:
    def __init__(self, video_path, audio_files, subtitle_files, main_audio, main_subtitle, keep_original_audio, remove_vocals, subtitle_mode, extend_video_for_dub=False, generated_srt_path=None, target_lang=None, progress_callback=None, is_stopped_func=None, logger=None):
        self.video_path = video_path
        self.audio_files = audio_files or []
        self.subtitle_files = subtitle_files or []
        self.main_audio = main_audio
        self.main_subtitle = main_subtitle
        self.keep_original_audio = keep_original_audio
        self.remove_vocals = remove_vocals
        self.subtitle_mode = subtitle_mode
        self.extend_video_for_dub = extend_video_for_dub
        self.generated_srt_path = generated_srt_path
        self.target_lang = target_lang
        self.progress_callback = progress_callback
        self.is_stopped = is_stopped_func or (lambda: False)
        
        video_name = os.path.splitext(os.path.basename(video_path))[0]

        # Try to extract target language from main dubbed file if not provided explicitly
        if not self.target_lang and self.main_audio and os.path.exists(self.main_audio):
            dubbed_filename = os.path.basename(self.main_audio)
            # Expected format: {video_name}.{lang}.m4a
            dubbed_basename = os.path.splitext(dubbed_filename)[0]
            if dubbed_basename.startswith(video_name + '.'):
                lang_part = dubbed_basename[len(video_name) + 1:]
                self.target_lang = lang_part

        # Create a project directory under the global workspace
        self.work_dir = os.path.join(app_config.get('paths.workspace', 'trans'), video_name)
        os.makedirs(self.work_dir, exist_ok=True)

        # Set up Logger
        self.logger = logger or get_logger(f'video_{video_name}')
        self.process_manager = ProcessManager()
        
        self.progress(_("Work directory: {}").format(self.work_dir))
        self.logger.debug(_("Work directory set to: {}").format(self.work_dir))

    def progress(self, msg):
        if self.progress_callback:
            self.progress_callback(msg)
        self.logger.info(msg)

    def log(self, msg):
        self.logger.debug(msg)

    def execute_command(self, command, **kwargs):
        if self.is_stopped():
            raise InterruptedError(_("Task was stopped by user."))
        self.progress(_("Executing: {}...").format(command[0]))
        self.process_manager.run(command, log_callback=self.log, **kwargs)

def compose_video_core(ctx):
    ctx.logger.info(f"[DEBUG] compose_video_core received extend_video_for_dub: {ctx.extend_video_for_dub}")
    if ctx.extend_video_for_dub:
        return _compose_by_stretching_video(ctx)
    else:
        return _compose_normally(ctx)

def _compose_normally(ctx):
    # This is the original composition logic
    video_basename = os.path.splitext(os.path.basename(ctx.video_path))[0]
    
    if not ctx.main_audio and ctx.audio_files:
        ctx.main_audio = ctx.audio_files[0]
        ctx.log(_("No main audio specified, auto-selecting first: {}").format(ctx.main_audio))

    if not ctx.main_subtitle and ctx.subtitle_files:
        ctx.main_subtitle = ctx.subtitle_files[0]
        ctx.log(_("No main subtitle specified, auto-selecting first: {}").format(ctx.main_subtitle))

    if not ctx.target_lang:
        if ctx.main_audio:
            dubbed_filename = os.path.basename(ctx.main_audio)
            dubbed_basename = os.path.splitext(dubbed_filename)[0]
            if dubbed_basename.startswith(video_name + '.'):
                lang_part = dubbed_basename[len(video_name) + 1:]
                ctx.target_lang = lang_part
    
    if not ctx.target_lang:
        ctx.target_lang = "composed"
        ctx.log(_("Could not determine target language, using default 'composed'"))

    output_filename = f"{video_basename}.{ctx.target_lang}.mp4"
    output_path = os.path.join(ctx.work_dir, output_filename)

    local_audio_files = []
    for file_path in ctx.audio_files:
        if file_path and os.path.exists(file_path):
            local_path = os.path.join(ctx.work_dir, os.path.basename(file_path))
            if os.path.abspath(file_path) != os.path.abspath(local_path):
                shutil.copy(file_path, local_path)
            local_audio_files.append(local_path)
    ctx.log(_("Copied {} audio files to work directory.").format(len(local_audio_files)))

    subtitle_inputs = []
    processed_subtitle_paths = set()
    for file_path in ctx.subtitle_files:
        if file_path and os.path.exists(file_path) and file_path not in processed_subtitle_paths:
            local_path = os.path.join(ctx.work_dir, os.path.basename(file_path))
            if os.path.abspath(file_path) != os.path.abspath(local_path):
                shutil.copy(file_path, local_path)
            
            lang = get_language_code_from_path(local_path)
            subtitle_inputs.append({"path": local_path, "lang": lang, "is_main": local_path == ctx.main_subtitle})
            processed_subtitle_paths.add(file_path)
            ctx.log(_("Added subtitle for processing: {} (Language: {})").format(os.path.basename(local_path), lang))

    if subtitle_inputs:
        if ctx.main_subtitle:
            main_sub_path_local = os.path.join(ctx.work_dir, os.path.basename(ctx.main_subtitle))
            subtitle_inputs.sort(key=lambda x: 0 if x['path'] == main_sub_path_local else 1)
        elif ctx.target_lang != "composed":
             subtitle_inputs.sort(key=lambda x: 0 if x['lang'] == ctx.target_lang else 1)
        ctx.log(_("Subtitle embedding order: {}").format([os.path.basename(s['path']) for s in subtitle_inputs]))

    accompaniment_path = None
    local_main_audio_path = os.path.join(ctx.work_dir, os.path.basename(ctx.main_audio)) if ctx.main_audio else None
    if ctx.remove_vocals and local_main_audio_path:
        ctx.progress(_("--- Starting vocal separation ---"))
        original_audio_path = os.path.join(ctx.work_dir, "original_audio.wav")
        extract_cmd = ["ffmpeg", "-i", ctx.video_path, "-vn", "-acodec", "pcm_s16le", "-ar", "44100", "-ac", "2", "-y", original_audio_path]
        ctx.execute_command(extract_cmd)
        remover = VocalRemover(
            process_manager=ctx.process_manager,
            progress_callback=ctx.log,
            work_dir=ctx.work_dir,
            is_stopped_func=ctx.is_stopped
        )
        accompaniment_path = remover.separate(original_audio_path)
        ctx.progress(_("Vocal separation complete"))

    command = ["ffmpeg", "-y", "-i", ctx.video_path]
    
    for audio_path in local_audio_files:
        command.extend(["-i", audio_path])
    if accompaniment_path:
        command.extend(["-i", accompaniment_path])
    
    for sub in subtitle_inputs:
        command.extend(["-i", sub["path"]])

    video_codec, video_map = "copy", "0:v"
    audio_map_commands, audio_dispositions, audio_metadata_commands = [], [], []
    filter_complex_parts = []
    
    audio_input_offset = 1
    
    if ctx.remove_vocals and local_main_audio_path and accompaniment_path:
        main_audio_index = local_audio_files.index(local_main_audio_path) + audio_input_offset
        accompaniment_index = len(local_audio_files) + audio_input_offset
        filter_complex_parts.append(f"[{main_audio_index}:a][{accompaniment_index}:a]amerge=inputs=2[merged_main_audio]")
        audio_map_commands.append(("-map", "[merged_main_audio]"))
        audio_dispositions.append("default")
        title = get_track_title(local_main_audio_path)
        lang_code = get_language_code_from_path(local_main_audio_path)
        audio_metadata_commands.append((f"-metadata:s:a:0", f"language={lang_code}"))
        audio_metadata_commands.append((f"-metadata:s:a:0", f"title={title}"))

        other_audio_stream_index = 1
        for i, audio_path in enumerate(local_audio_files):
            if audio_path != local_main_audio_path:
                audio_map_commands.append(("-map", f"{i + audio_input_offset}:a"))
                audio_dispositions.append("0")
                title = get_track_title(audio_path)
                lang_code = get_language_code_from_path(audio_path)
                audio_metadata_commands.append((f"-metadata:s:a:{other_audio_stream_index}", f"language={lang_code}"))
                audio_metadata_commands.append((f"-metadata:s:a:{other_audio_stream_index}", f"title={title}"))
                other_audio_stream_index += 1
    else:
        for i, audio_path in enumerate(local_audio_files):
            audio_map_commands.append(("-map", f"{i + audio_input_offset}:a"))
            is_main = (audio_path == local_main_audio_path)
            audio_dispositions.append("default" if is_main else "0")
            title = get_track_title(audio_path)
            lang_code = get_language_code_from_path(audio_path)
            audio_metadata_commands.append((f"-metadata:s:a:{i}", f"language={lang_code}"))
            audio_metadata_commands.append((f"-metadata:s:a:{i}", f"title={title}"))

    if ctx.keep_original_audio:
        audio_map_commands.append(("-map", "0:a?"))
        audio_dispositions.append("0")
        original_audio_index = len(audio_map_commands) - 1
        audio_metadata_commands.append((f"-metadata:s:a:{original_audio_index}", "language=und"))
        audio_metadata_commands.append((f"-metadata:s:a:{original_audio_index}", "title=[Original Audio]"))

    if ctx.subtitle_mode == _("Hard Subtitles"):
        if not subtitle_inputs: raise ValueError(_("No subtitle file found or provided for hard subtitle mode."))
        video_codec = _get_best_video_codec(ctx.logger)
        main_srt_path = subtitle_inputs[0]['path']
        ass_path = os.path.join(ctx.work_dir, "subtitle.ass")
        convert_command = ["ffmpeg", "-i", main_srt_path, "-y", ass_path]
        ctx.execute_command(convert_command)
        escaped_ass_path = ass_path.replace('\\', '/').replace(':', '\\:')
        video_map = "[vout]"
        filter_complex_parts.insert(0, f"[0:v]ass='{escaped_ass_path}'[vout]")
    
    if filter_complex_parts:
        command.extend(["-filter_complex", ";".join(filter_complex_parts)])

    command.extend(["-map", video_map])
    for map_part in audio_map_commands:
        command.extend(map_part)

    for i, disp in enumerate(audio_dispositions):
        command.extend([f"-disposition:a:{i}", disp])
    
    for args in audio_metadata_commands:
        command.extend(args)

    if ctx.subtitle_mode == _("Soft Subtitles") and subtitle_inputs:
        subtitle_input_offset = 1 + len(local_audio_files) + (1 if accompaniment_path else 0)
        for i, sub in enumerate(subtitle_inputs):
            stream_index = subtitle_input_offset + i
            
            if ctx.generated_srt_path and os.path.samefile(sub['path'], ctx.generated_srt_path):
                title = "[Generated SRT]"
            else:
                title = get_track_title(sub['path'])

            command.extend(["-map", f"{stream_index}:s?"])
            command.extend([f"-metadata:s:s:{i}", f"language={sub['lang']}"])
            command.extend([f"-metadata:s:s:{i}", f"title={title}"])
        
        main_sub_index = -1
        if ctx.main_subtitle:
            main_sub_path_local = os.path.join(ctx.work_dir, os.path.basename(ctx.main_subtitle))
            for i, sub in enumerate(subtitle_inputs):
                if sub['path'] == main_sub_path_local:
                    main_sub_index = i
                    break
        
        if main_sub_index == -1 and subtitle_inputs:
            main_sub_index = 0

        for i in range(len(subtitle_inputs)):
            command.extend([f"-disposition:s:{i}", "default" if i == main_sub_index else "0"])
        
        command.extend(["-c:s", "mov_text"])

    command.extend(["-c:v", video_codec])
    if video_codec != "copy":
        command.extend(["-preset", "fast", "-crf", "22"])
    
    if audio_map_commands:
        command.extend(["-c:a", "aac", "-b:a", "192k"])
        
    command.append(output_path)

    ctx.progress(_("--- Starting final video composition ---"))
    ctx.execute_command(command)
    
    return output_path

def _compose_by_stretching_video(ctx):
    ctx.progress(_("--- Starting composition with video extension ---"))
    
    # 1. Validations
    if not ctx.main_subtitle or not os.path.exists(ctx.main_subtitle):
        raise ValueError(_("A main subtitle file must be specified for this mode."))

    try:
        subprocess.run(["ffprobe", "-version"], capture_output=True, check=True, creationflags=subprocess.CREATE_NO_WINDOW if sys.platform == "win32" else 0)
    except (FileNotFoundError, subprocess.CalledProcessError):
        raise RuntimeError(_("ffprobe is required but not found. Please ensure ffmpeg is installed correctly."))

    # 2. Setup
    video_basename = os.path.splitext(os.path.basename(ctx.video_path))[0]
    target_lang = ctx.target_lang or 'composed'
    output_filename = f"{video_basename}.{target_lang}.stretched.mp4"
    output_path = os.path.join(ctx.work_dir, output_filename)
    
    segments_dir = os.path.join(ctx.work_dir, "_segments")
    os.makedirs(segments_dir, exist_ok=True)

    # 3. Find audio clips generated by the dubbing worker
    clips_dir = os.path.join(ctx.work_dir, f"clips.{target_lang}")
    if not os.path.isdir(clips_dir):
        # Fallback for older versions or different naming conventions from dubbing worker
        clips_dir = os.path.join(os.path.dirname(ctx.main_subtitle), f"clips.{target_lang}")
        if not os.path.isdir(clips_dir):
             raise FileNotFoundError(_("Audio clips directory not found in expected locations. Please ensure the dubbing step was completed successfully."))

    # 4. Prepare main SRT
    main_srt_path = os.path.join(ctx.work_dir, os.path.basename(ctx.main_subtitle))
    if not os.path.exists(main_srt_path):
        shutil.copy(ctx.main_subtitle, main_srt_path)
    srt_segments = _parse_srt(main_srt_path)

    # 5. Create corresponding video segments and a new SRT file with adjusted timings
    ctx.progress(_("Generating video segments and new SRT based on audio clip durations..."))
    video_segment_paths = []
    new_srt_segments = []
    current_time = srt.timedelta()
    video_codec = _get_best_video_codec(ctx.logger)

    for i, seg in enumerate(srt_segments):
        if ctx.is_stopped(): raise InterruptedError()

        audio_clip_path = os.path.join(clips_dir, f"clip_{i:04d}.mp3")
        video_segment_path = os.path.join(segments_dir, f"video_segment_{i:04d}.mp4")
        video_segment_paths.append(video_segment_path)

        if not os.path.exists(audio_clip_path):
            ctx.logger.warning(f"Audio clip {audio_clip_path} not found. Using SRT duration for segment {i}.")
            dub_duration_sec = seg.end.total_seconds() - seg.start.total_seconds()
        else:
            dub_duration_sec = _get_media_duration(audio_clip_path, ctx.logger)

        srt_duration_sec = seg.end.total_seconds() - seg.start.total_seconds()

        if dub_duration_sec is None:
            ctx.logger.warning(f"Could not get duration for {audio_clip_path}, using original SRT duration for segment {i}.")
            dub_duration_sec = srt_duration_sec
        
        # Update SRT timings
        dub_duration_td = srt.timedelta(seconds=dub_duration_sec)
        new_start_time = current_time
        new_end_time = new_start_time + dub_duration_td
        new_seg = srt.Subtitle(index=i + 1, start=new_start_time, end=new_end_time, content=seg.content)
        new_srt_segments.append(new_seg)
        current_time = new_end_time

        # Prepare video segment command
        extension = dub_duration_sec - srt_duration_sec
        cmd = ["ffmpeg", "-y", "-ss", str(seg.start.total_seconds()), "-t", str(srt_duration_sec), "-i", ctx.video_path]

        if extension > 0.02: # Only extend if difference is meaningful
            ctx.log(f"Segment {i}: Dub is {extension:.2f}s longer. Extending video frame.")
            cmd.extend(["-vf", f"tpad=stop_mode=clone:stop_duration={extension}"])
        else:
            ctx.log(f"Segment {i}: Dub is not longer or shorter. Re-encoding video segment for compatibility.")
        
        cmd.extend(["-c:v", video_codec, "-preset", "fast", "-crf", "22"])
        cmd.extend(["-an"])
        cmd.append(video_segment_path)
        ctx.execute_command(cmd)

    # Overwrite the main SRT file with the new, adjusted timings
    ctx.progress(_("Updating main subtitle file with new timings..."))
    with open(main_srt_path, 'w', encoding='utf-8') as f:
        f.write(srt.compose(new_srt_segments))
    ctx.log(f"Updated {os.path.basename(main_srt_path)} with timings synchronized with dubbed audio.")

    # 6. Concatenate video segments
    ctx.progress(_("Concatenating video segments..."))
    concat_list_path = os.path.join(segments_dir, "concat_list.txt")
    with open(concat_list_path, 'w', encoding='utf-8') as f:
        for path in video_segment_paths:
            f.write(f"file '{os.path.basename(path)}'\n")

    final_video_path = os.path.join(ctx.work_dir, "final_silent_video.mp4")
    concat_cmd = [
        "ffmpeg", "-y",
        "-f", "concat",
        "-safe", "0",
        "-i", concat_list_path,
        "-c", "copy",
        final_video_path
    ]
    ctx.execute_command(concat_cmd, cwd=segments_dir)

    # 7. Mux final video with all audio and subtitles
    ctx.progress(_("Muxing final video, audio, and subtitles..."))
    mux_cmd = ["ffmpeg", "-y", "-i", final_video_path]
    
    local_audio_files = []
    main_dub_audio_path = None
    for file_path in ctx.audio_files:
        if file_path and os.path.exists(file_path):
            local_path = os.path.join(ctx.work_dir, os.path.basename(file_path))
            if os.path.abspath(file_path) != os.path.abspath(local_path):
                shutil.copy(file_path, local_path)
            local_audio_files.append(local_path)
            mux_cmd.extend(["-i", local_path])
            if file_path == ctx.main_audio:
                main_dub_audio_path = local_path

    if ctx.keep_original_audio:
        original_audio_path = os.path.join(ctx.work_dir, "original_audio_mux.m4a")
        extract_cmd = ["ffmpeg", "-y", "-i", ctx.video_path, "-vn", "-c:a", "copy", original_audio_path]
        ctx.execute_command(extract_cmd)
        mux_cmd.extend(["-i", original_audio_path])

    mux_cmd.extend(["-map", "0:v:0"])
    audio_offset = 1
    for i in range(len(local_audio_files)):
        mux_cmd.extend(["-map", f"{i + audio_offset}:a:0"])
    if ctx.keep_original_audio:
        mux_cmd.extend(["-map", f"{len(local_audio_files) + audio_offset}:a:0"])

    mux_cmd.extend(["-c:v", "copy"])
    mux_cmd.extend(["-c:a", "copy"])

    for i, audio_path in enumerate(local_audio_files):
        is_main = (audio_path == main_dub_audio_path)
        lang = get_language_code_from_path(audio_path)
        title = get_track_title(audio_path)
        mux_cmd.extend([f"-disposition:a:{i}", "default" if is_main else "0"])
        mux_cmd.extend([f"-metadata:s:a:{i}", f"language={lang}"])
        mux_cmd.extend([f"-metadata:s:a:{i}", f"title={title}"])
    
    if ctx.keep_original_audio:
        original_audio_index = len(local_audio_files)
        mux_cmd.extend([f"-disposition:a:{original_audio_index}", "0"])
        mux_cmd.extend([f"-metadata:s:a:{original_audio_index}", "language=und"])
        mux_cmd.extend([f"-metadata:s:a:{original_audio_index}", f"title=[Original Audio]"])

    mux_cmd.append(output_path)
    ctx.execute_command(mux_cmd)

    # 8. Final step: Add soft subtitles if needed
    stretched_video_path = output_path # Save path to ...stretched.mp4
    if ctx.subtitle_mode == _("Soft Subtitles") and ctx.subtitle_files:
        ctx.progress(_("Adding soft subtitles..."))
        final_output_path_with_subs = stretched_video_path.replace(".stretched.mp4", ".final.mp4")
        sub_cmd = ["ffmpeg", "-y", "-i", stretched_video_path]
        
        local_subtitle_files = []
        for file_path in ctx.subtitle_files:
            local_path = os.path.join(ctx.work_dir, os.path.basename(file_path))
            if not os.path.exists(local_path):
                shutil.copy(file_path, local_path)
            local_subtitle_files.append(local_path)
            sub_cmd.extend(["-i", local_path])

        sub_cmd.extend(["-map", "0", "-map", "-0:s"])
        for i in range(len(local_subtitle_files)):
            sub_cmd.extend(["-map", f"{i+1}"])

        sub_cmd.extend(["-c", "copy"])
        sub_cmd.extend(["-c:s", "mov_text"])

        for i, sub_path in enumerate(local_subtitle_files):
            is_main = (sub_path == main_srt_path)
            lang = get_language_code_from_path(sub_path)
            title = get_track_title(sub_path)
            sub_cmd.extend([f"-disposition:s:{i}", "default" if is_main else "0"])
            sub_cmd.extend([f"-metadata:s:s:{i}", f"language={lang}"])
            sub_cmd.extend([f"-metadata:s:s:{i}", f"title={title}"])

        sub_cmd.append(final_output_path_with_subs)
        ctx.execute_command(sub_cmd)
        output_path = final_output_path_with_subs
    
    # 9. Final Rename & Cleanup
    ctx.progress(_("Finalizing and cleaning up temporary files..."))
    
    final_target_name = f"{video_basename}.{target_lang}.mp4"
    final_target_path = os.path.join(ctx.work_dir, final_target_name)
    
    # Rename the final output file
    if os.path.abspath(output_path) != os.path.abspath(final_target_path):
        ctx.log(f"Renaming '{os.path.basename(output_path)}' to '{os.path.basename(final_target_path)}'")
        if os.path.exists(final_target_path):
            os.remove(final_target_path)
        os.rename(output_path, final_target_path)

    # Cleanup temporary files
    temp_files_to_remove = [
        final_video_path, # .../final_silent_video.mp4
    ]
    # Add the stretched video to cleanup list if it's not the final output
    if os.path.abspath(stretched_video_path) != os.path.abspath(final_target_path):
        temp_files_to_remove.append(stretched_video_path)

    if ctx.keep_original_audio:
        original_audio_path = os.path.join(ctx.work_dir, "original_audio_mux.m4a")
        temp_files_to_remove.append(original_audio_path)

    for f_path in temp_files_to_remove:
        if f_path and os.path.exists(f_path):
            try:
                ctx.log(f"Removing temporary file: {os.path.basename(f_path)}")
                os.remove(f_path)
            except OSError as e:
                ctx.logger.warning(f"Could not remove temporary file {f_path}: {e}")

    shutil.rmtree(segments_dir, ignore_errors=True)
    
    return final_target_path

class VideoCompositionWorker(BaseWorker):
    composition_finished = Signal(str, str)

    def __init__(self, **kwargs):
        super().__init__()
        self.kwargs = kwargs
        self.ctx = None

    def stop(self):
        super().stop()
        if self.ctx and self.ctx.process_manager:
            self.ctx.process_manager.stop()

    def core_run(self):
        try:
            self.progress.emit(_("Starting video composition..."))
            self.ctx = CompositionContext(
                **self.kwargs,
                progress_callback=self.progress.emit,
                is_stopped_func=self.isInterruptionRequested
            )
            output_path = compose_video_core(self.ctx)
            if self.isInterruptionRequested():
                self.progress.emit(_("Video composition task cancelled."))
                return
            self.composition_finished.emit(_("Video composition successful!"), output_path)
        except InterruptedError as e:
            self.progress.emit(_("Task cancelled: {}").format(e))
        except Exception as e:
            if not self.isInterruptionRequested():
                self.error.emit(_("Video composition failed: {}").format(str(e)))
                if self.ctx:
                    self.ctx.logger.exception("Video composition failed")
        finally:
            # Keep work directory and log file as per user request
            if self.ctx and self.ctx.work_dir:
                 self.progress.emit(_("Processing complete. Logs and files are saved in: {}").format(self.ctx.work_dir))
