from src.ui.base_worker import BaseWorker
import os
import time
import multiprocessing
from PySide6.QtCore import QThread, Signal
from src.config import app_config
from src.utils.process import ProcessManager
from src.utils.logger import get_logger
from src.ui.i18n import _



def generate_srt_core(process_manager, media_path, model_path, language, translate, work_dir, output_srt_path=None, progress_callback=None, timing_callback=None, is_stopped_func=None):
    """
    Core logic for subtitle generation.
    
    Args:
        ...
        output_srt_path (str, optional): The explicit path to save the final SRT file.
                                         If None, it defaults to the same directory as the media file.
    """
    total_start_time = time.time()
    logger = get_logger(__name__)
    is_stopped = is_stopped_func or (lambda: False)

    def record_time(stage, elapsed_time):
        logger.debug(_("⏱️  [Timing] {stage} took: {time:.2f} seconds").format(
            stage=stage, time=elapsed_time))
        if timing_callback:
            timing_callback(stage, elapsed_time)

    # 1. Extract audio
    if is_stopped():
        return None, 0
    
    media_name = os.path.splitext(os.path.basename(media_path))[0]
    wav_path = os.path.join(work_dir, f"{media_name}.wav")
    if progress_callback:
        progress_callback(_("Extracting audio..."))
    stage_start_time = time.time()

    extract_command = [
        "ffmpeg", "-i", media_path,
        "-map", "0:a", "-ac", "1", "-ar", "16000",
        "-af", "aresample=resampler=soxr",
        "-y", wav_path
    ]
    process_manager.run(extract_command, log_callback=logger.debug)
    if is_stopped():
        return None, 0
    record_time(_("Extract Audio"), time.time() - stage_start_time)

    # 2. Run Whisper
    if is_stopped():
        return None, 0
    if progress_callback:
        progress_callback(_("Performing speech-to-text..."))
    stage_start_time = time.time()
    
    # Determine the output path for the SRT file.
    # If an explicit path is given, use it. Otherwise, default to the media file's location.
    final_srt_path = output_srt_path if output_srt_path else f"{os.path.splitext(media_path)[0]}.srt"
    output_srt_base = os.path.splitext(final_srt_path)[0]

    cpu_cores = multiprocessing.cpu_count()
    whisper_command = [
        "whisper-cli",
        "-m", model_path,
        "-f", wav_path,
        "-l", language,
        "-t", str(cpu_cores),
        "--output-srt",
        "--output-file", output_srt_base,
        "--vad",
        "--vad-model", "models/ggml-silero-v5.1.2.bin",
        "--vad-min-silence-duration-ms", "75",
        "--vad-max-speech-duration-s", "15",
        "--split-on-word",
    ]

    if translate:
        whisper_command.append("-tr")

    process_manager.run(whisper_command, log_callback=logger.debug)
    if is_stopped():
        return None, 0
    record_time(_("Whisper Transcription"), time.time() - stage_start_time)

    if progress_callback:
        progress_callback(
            _("Successfully generated subtitle file: {}").format(final_srt_path))

    total_elapsed_time = time.time() - total_start_time
    record_time(_("Total Time"), total_elapsed_time)

    return final_srt_path, total_elapsed_time


class ProcessingWorker(BaseWorker):
    """
    Executes ffmpeg and whisper commands in the background to avoid freezing the UI.
    This is a QThread wrapper for the core logic.
    """
    # Inherits progress, error, time_updated, finished(bool, float)
    timing_updated = Signal(str, float)  # stage_name, elapsed_time
    task_finished = Signal(str, float)  # success_message, total_time

    def __init__(self, media_path, model_path, language, translate):
        super().__init__()
        self.media_path = media_path
        self.model_path = model_path
        self.language = language
        self.translate = translate
        self.process_manager = ProcessManager()

    def core_run(self):
        # For standalone generation, create a dedicated directory for temporary files.
        media_name = os.path.splitext(os.path.basename(self.media_path))[0]
        workspace_root = app_config.get('paths.workspace', 'trans')
        work_dir = os.path.join(workspace_root, 'generated_audio', media_name)
        os.makedirs(work_dir, exist_ok=True)

        output_srt_path, total_time = generate_srt_core(
            self.process_manager,
            self.media_path,
            self.model_path,
            self.language,
            self.translate,
            work_dir,
            progress_callback=self.progress.emit,
            timing_callback=self.timing_updated.emit,
            is_stopped_func=self.isInterruptionRequested
        )
        if self.isInterruptionRequested():
            self.progress.emit(
                _("Processing was manually stopped by the user."))
            return

        if output_srt_path:
            self.task_finished.emit(_("Successfully generated subtitle file: {}").format(
                output_srt_path), total_time)
        else:
            if not self.isInterruptionRequested():
                raise Exception(
                    _("SRT generation core logic did not return an output path but reported no error."))

    def stop(self):
        super().stop()
        self.process_manager.stop()
