import os
import time
import logging
import concurrent.futures
from src.ui.i18n import _
from PySide6.QtCore import Signal, QTimer
from src.ui.base_worker import BaseWorker
from src.utils.formatter import format_seconds

# 导入核心逻辑函数
from src.processing.worker import generate_srt_core
from src.translation.translation_worker import translate_srt_core
from src.processing.dubbing_worker import dub_srt_core
from src.processing.video_composition_worker import compose_video_core, CompositionContext
from src.config import app_config
from src.utils.logger import get_logger, create_task_logger
from src.utils.process import ProcessManager


class BatchCompositionWorker(BaseWorker):
    class LoggerAdapter:
        def __init__(self, log_func, video_path, video_name):
            self._log = log_func
            self._video_path = video_path
            self._video_name = video_name

        def info(self, msg):
            self._log(msg, self._video_path, self._video_name, level="INFO")

        def debug(self, msg):
            self._log(msg, self._video_path, self._video_name, level="DEBUG")
            
    """
    Executes batch video composition tasks in the background.
    """
    # Inherits progress, log_message, error, time_updated, finished(bool, float)
    current_file_progress = Signal(int, int, str) # current file index, total files, filename
    video_status_changed = Signal(str, str, str) # video_path, status_text, status_key
    video_time_updated = Signal(str, float) # video_path, elapsed_seconds
    # New unified log signal: video_path, message, level ("INFO" or "DEBUG")
    log_message = Signal(str, str, str)

    def __init__(self, video_paths, tasks, options):
        super().__init__()
        self.video_paths = video_paths
        self.tasks = tasks
        self.options = options
        self.logger = get_logger(__name__)
        
        self.max_parallel_tasks = self.options.get("max_parallel_tasks", 1)
        self.executor = None
        self.process_managers = {} # video_path -> ProcessManager

    def update_max_workers(self, new_limit):
        """
        Safely updates the max number of workers in the thread pool executor.
        This method is thread-safe and can be called from the main UI thread.
        """
        self.max_parallel_tasks = new_limit
        if self.executor:
            # Note: This uses an internal attribute of ThreadPoolExecutor.
            # This is generally not recommended, but it's the most direct way
            # to change the pool size dynamically for this specific use case.
            # A lock isn't strictly necessary here as the change is atomic.
            self.executor._max_workers = new_limit

    def _log(self, message, video_path=None, video_name=None, level="INFO"):
        """
        Helper for logging. Emits a unified log signal for the UI and also
        logs to the standard Python logger (for console/file output).
        - INFO: For general progress, shown in main log and stored in task log.
        - DEBUG: For detailed progress, stored in task log only.
        """
        # The video_name is for display purposes in the main log.
        # The video_path is the key for storing logs.
        display_message = f"[{video_name}] {message}" if video_name else message
        
        # 1. Log to standard logger (for console and file)
        log_level = logging.DEBUG if level.upper() == "DEBUG" else logging.INFO
        self.logger.log(log_level, display_message)

        # 2. Always emit the unified signal for the UI.
        if video_path:
            self.log_message.emit(video_path, message, level)
        else:
            # For global messages, use a special path key and send to main log
            self.log_message.emit("global", display_message, "INFO")

    def core_run(self):
        total_files = len(self.video_paths)
        self._log(_("Starting batch processing for a total of {} files.").format(total_files))
        self._log(_("Whisper tasks will run sequentially, subsequent tasks will be pipelined with up to {} parallel tasks.").format(self.max_parallel_tasks))

        with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_parallel_tasks) as self.executor:
            if self.isInterruptionRequested(): return

            future_to_video = {}

            # --- 串行生成SRT并流水线提交后续任务 ---
            for i, video_path in enumerate(self.video_paths):
                if self.isInterruptionRequested(): break
                
                video_name = os.path.splitext(os.path.basename(video_path))[0]
                self.current_file_progress.emit(i + 1, total_files, video_name)
                self.video_status_changed.emit(video_path, _("Queued..."), "QUEUED")
                self._log(_("--- Queued video for processing: {} ---").format(video_path), video_path, video_name)

                work_dir = os.path.join(app_config.get('paths.workspace', 'trans'), video_name)
                os.makedirs(work_dir, exist_ok=True)

                final_video_path = os.path.join(work_dir, f"{video_name}.{self.options.get('target_lang', 'zh-CN')}.mp4")
                if self.options.get("skip_existing") and os.path.exists(final_video_path):
                    self._log(_("Final video already exists, skipping processing: {}").format(final_video_path), video_path, video_name)
                    self.video_status_changed.emit(video_path, _("Skipped"), "SKIPPED")
                    self.video_time_updated.emit(video_path, 0.0)
                    continue

                # --- 步骤 1: 生成SRT (串行) ---
                original_srt_path = os.path.join(work_dir, f"{video_name}.srt")
                srt_generated = False
                if self.tasks.get("generate_srt"):
                    if self.options.get("skip_existing") and os.path.exists(original_srt_path):
                        self._log(_("Step 1: Found original subtitle file, skipping generation: {}").format(original_srt_path), video_path, video_name)
                        srt_generated = True
                    else:
                        self._log(_("Step 1: Starting to generate original subtitles (Whisper)..."), video_path, video_name)
                        try:
                            # Pass the correct target path for the SRT file
                            self._run_generation(video_path, work_dir, original_srt_path)
                            if os.path.exists(original_srt_path):
                                self._log(_("Step 1: Successfully generated original subtitles: {}").format(original_srt_path), video_path, video_name)
                                srt_generated = True
                            else:
                                self._log(_("Step 1: Whisper task completed, but the expected SRT file was not generated."), video_path, video_name)
                        except Exception as e:
                            self._log(_("Step 1: Error during subtitle generation: {}").format(e), video_path, video_name)
                elif os.path.exists(original_srt_path):
                    self._log(_("Step 1: Skipping generation, using existing SRT file: {}").format(original_srt_path), video_path, video_name)
                    srt_generated = True

                # --- 如果SRT就绪，提交后续任务到线程池 ---
                if not self.isInterruptionRequested() and srt_generated:
                    self._log(_("SRT is ready, submitting subsequent tasks for parallel processing..."), video_path, video_name)
                    future = self.executor.submit(self._process_video_task, video_path)
                    future_to_video[future] = video_path
                elif not srt_generated:
                    self._log(_("Video '{}' will not be processed further due to missing SRT file.").format(video_name), video_path, video_name)
                    self.video_status_changed.emit(video_path, _("Failed (No SRT)"), "FAILED")

            if self.isInterruptionRequested():
                self._log(_("Task was interrupted by the user."))
                return

            # --- Wait for all submitted parallel tasks to complete ---
            self._log(_("\n--- All tasks submitted, waiting for parallel processing to complete ---"))
            for future in concurrent.futures.as_completed(future_to_video):
                video_path = future_to_video[future]
                video_name = os.path.splitext(os.path.basename(video_path))[0]
                try:
                    result = future.result()
                    if result:
                        self._log(_("Video '{}' processed successfully: {}").format(video_name, result), video_path, video_name)
                    else:
                        self._log(_("Video '{}' processing finished but returned no result path.").format(video_name), video_path, video_name)
                except concurrent.futures.CancelledError:
                    self._log(_("Task for video '{}' was cancelled.").format(video_name), video_path, video_name)
                    self.video_status_changed.emit(video_path, _("Cancelled"), "CANCELLED")
                except Exception as exc:
                    # The 'finally' block in _process_video_task will have already emitted the 'Failed' status
                    self._log(_("An error occurred while processing video '{}': {}").format(video_name, exc), video_path, video_name)
                    # Don't emit a global error for every single video failure in a batch
                    # self.error.emit(_("Error processing {}: {}").format(video_name, exc))

        if not self.isInterruptionRequested():
            self.progress.emit(_("\n🎉 All batch tasks have been processed!"))
        else:
            self.progress.emit(_("\nBatch task was stopped by the user."))

    def _process_video_task(self, video_path):
        """Handles all tasks for a single video, from translation to composition, with retries."""
        if self.isInterruptionRequested(): return None

        start_time = time.time()
        status = _("Failed")
        video_name = os.path.splitext(os.path.basename(video_path))[0]
        work_dir = os.path.join(app_config.get('paths.workspace', 'trans'), video_name)
        
        # --- Create a dedicated logger for this specific video task ---
        task_log_path = os.path.join(work_dir, f"{video_name}.log")
        task_logger = create_task_logger(task_log_path)
        task_logger.info(f"--- Starting processing for video: {video_name} ---")
        
        auto_retry = self.options.get("auto_retry", False)
        max_retries = self.options.get("retry_count", 3) if auto_retry else 0
        
        timer = QTimer()
        timer.timeout.connect(lambda: self.video_time_updated.emit(video_path, time.time() - start_time))
        timer.start(1000)

        for attempt in range(max_retries + 1):
            if self.isInterruptionRequested():
                status = _("Cancelled")
                task_logger.warning("Task was cancelled by user.")
                break
            
            try:
                # Set initial status for the attempt
                self.video_status_changed.emit(video_path, _("Processing..."), "PROCESSING")
                task_logger.info(f"--- Starting attempt {attempt + 1}/{max_retries + 1} ---")
                
                target_lang = self.options.get("target_lang", "zh-CN")

                original_srt_path = os.path.join(work_dir, f"{video_name}.srt")
                translated_srt_path = os.path.join(work_dir, f"{video_name}.{target_lang}.srt")
                dub_audio_path = None

                # 2. 翻译字幕
                if not self.isInterruptionRequested() and self.tasks.get("translate_srt"):
                    self.video_status_changed.emit(video_path, _("Translating..."), "TRANSLATING")
                    if self.options.get("skip_existing") and os.path.exists(translated_srt_path):
                        self._log(_("Step 2: Found translated subtitles, skipping translation."), video_path, video_name)
                        task_logger.info("Found existing translated subtitles, skipping translation.")
                    else:
                        if not os.path.exists(original_srt_path):
                            raise RuntimeError(_("Cannot translate subtitles because the original SRT file does not exist."))
                        self._log(_("Step 2: Starting subtitle translation..."), video_path, video_name)
                        self._run_translation(original_srt_path, video_path, video_name, task_logger)
                
                # 3. Generate Dubbing
                if not self.isInterruptionRequested() and self.tasks.get("dub_video"):
                    self.video_status_changed.emit(video_path, _("Dubbing..."), "DUBBING")
                    prospective_dub_path = os.path.join(work_dir, f"{video_name}.{target_lang}.m4a")
                    if self.options.get("skip_existing") and os.path.exists(prospective_dub_path):
                        self._log(_("Step 3: Found dubbing file, skipping generation."), video_path, video_name)
                        task_logger.info("Found existing dubbing audio file, skipping generation.")
                        dub_audio_path = prospective_dub_path
                    else:
                        srt_for_dubbing = translated_srt_path if os.path.exists(translated_srt_path) else original_srt_path
                        if not os.path.exists(srt_for_dubbing):
                            raise RuntimeError(_("Cannot generate dubbing because no suitable SRT file was found."))
                        self._log(_("Step 3: Starting to generate dubbing..."), video_path, video_name)
                        dub_audio_path = self._run_dubbing(srt_for_dubbing, work_dir, video_path, video_name, target_lang, task_logger)

                # 4. Compose Video
                if not self.isInterruptionRequested() and self.tasks.get("compose_video"):
                    self.video_status_changed.emit(video_path, _("Composing..."), "COMPOSING")
                    self._log(_("Step 4: Starting final video composition..."), video_path, video_name)
                    result = self._run_composition(video_path, dub_audio_path, translated_srt_path, original_srt_path, video_name, target_lang, task_logger)
                    status = "COMPLETED"
                    task_logger.info(f"--- Task completed successfully. Output: {result} ---")
                    return result # Success, exit the retry loop and return result
                
                status = "COMPLETED_PARTIAL"
                task_logger.info("--- Task finished with only partial steps completed. ---")
                return None # Partial success, exit loop

            except Exception as e:
                task_logger.error(f"An error occurred during attempt {attempt + 1}: {e}", exc_info=True)
                if self.isInterruptionRequested():
                    status = "CANCELLED"
                    task_logger.warning("Task cancelled during exception handling.")
                    break # Exit loop if cancelled during an exception
                
                if attempt < max_retries:
                    self.video_status_changed.emit(video_path, _("Retrying..."), "RETRYING")
                    self._log(_("Task failed, retrying in a moment... (Attempt {current}/{total})").format(current=attempt + 1, total=max_retries), video_path, video_name)
                    self._log(f"  {_('Error')}: {e}", video_path, video_name)
                    time.sleep(2) # Wait a moment before retrying
                    continue # Go to the next attempt
                else:
                    status = "FAILED"
                    task_logger.error("Task failed after all retries.", exc_info=True)
                    # This was the last attempt, re-raise to be caught by the main future handler
                    raise
        
        # This block is reached after the loop finishes (either by break, or completing all retries)
        timer.stop()
        
        status_map = {
            "COMPLETED": _("Completed"),
            "COMPLETED_PARTIAL": _("Completed (Partial)"),
            "CANCELLED": _("Cancelled"),
            "FAILED": _("Failed"),
        }
        status_text = status_map.get(status, _("Failed"))
        
        if self.isInterruptionRequested():
            status_key = "CANCELLED"
            status_text = _("Cancelled")
        else:
            status_key = status
        
        end_time = time.time()
        duration = end_time - start_time
        self.video_status_changed.emit(video_path, status_text, status_key)
        self.video_time_updated.emit(video_path, duration)
        
        return None # Return None for failed or cancelled tasks

    def _run_generation(self, video_path, work_dir, output_srt_path):
        self.video_status_changed.emit(video_path, _("Generating SRT..."), "GENERATING_SRT")
        video_name = os.path.splitext(os.path.basename(video_path))[0]
        process_manager = ProcessManager()
        self.process_managers[video_path] = process_manager

        model_data = self.options.get("whisper_model")
        model_type = model_data.get("type")
        model_name = model_data.get("name")

        if model_type == 'local':
            model_path_or_name = os.path.join(
                app_config.get('paths.models', 'models'), model_name)
            if not os.path.exists(model_path_or_name):
                raise FileNotFoundError(
                    f"Local model file not found: {model_path_or_name}")
        else:  # online
            model_path_or_name = model_name

        return generate_srt_core(
            process_manager=process_manager,
            media_path=video_path, model_path=model_path_or_name, language=self.options.get("video_lang"),
            translate=self.options.get("translate_to_english"), work_dir=work_dir,
            progress_callback=lambda msg: self._log(msg, video_path, video_name, level="INFO"),
            is_stopped_func=self.isInterruptionRequested,
            output_srt_path=output_srt_path
        )

    def _run_translation(self, srt_path, video_path, video_name, task_logger):
        translator_name = self.options.get("translator")
        model_config = self.options.get("model_config")

        if translator_name == "OpenAI" and not model_config:
            raise ValueError(_("OpenAI translator was selected, but no model configuration was provided."))

        return translate_srt_core(
            worker=self,
            srt_path=srt_path,
            translator_name=translator_name,
            source_lang=self.options.get("source_lang"),
            target_lang=self.options.get("target_lang"),
            model_config=model_config,
            video_type=self.options.get("video_type", "常规"),
            translation_style=self.options.get("translation_style", "常规"),
            no_think=self.options.get("no_think", False),
            progress_callback=lambda msg: self._log(msg, video_path, video_name, level="INFO"),
            logger=task_logger, # Pass the dedicated task logger
            is_stopped_func=self.isInterruptionRequested
        )

    def _run_dubbing(self, srt_path, work_dir, video_path, video_name, target_lang, task_logger):
        clips_dir = os.path.join(work_dir, f"clips.{target_lang}")
        # Create a context object for dubbing
        from src.processing.dubbing_worker import DubbingContext

        # Create and register a process manager for this specific task
        dubbing_process_manager = ProcessManager()
        self.process_managers[f"{video_path}_dubbing"] = dubbing_process_manager

        ctx = DubbingContext(
            srt_path=srt_path,
            tts_service_name=self.options.get("tts_service"),
            voice=self.options.get("voice"),
            locale=target_lang,
            work_dir=work_dir,
            clips_dir=clips_dir,
            preserve_audio_duration=self.options.get("preserve_audio_duration"),
            is_stopped_func=self.isInterruptionRequested,
            logger=task_logger, # Pass the dedicated task logger
            process_manager=dubbing_process_manager,
            concurrent_tasks=self.max_parallel_tasks,
        )
        return dub_srt_core(ctx)

    def _run_composition(self, video_path, dub_path, translated_srt_path, original_srt_path, video_name, target_lang, task_logger):
        actual_dub_path = dub_path if dub_path and os.path.exists(dub_path) else None
        
        audio_files = [actual_dub_path] if actual_dub_path else []
        
        subtitle_files = []
        # The original SRT is only added if the user wants to embed it.
        # Its title will be handled in compose_video_core.
        if self.options.get("embed_whisper_srt") and os.path.exists(original_srt_path):
            subtitle_files.append(original_srt_path)
            
        if os.path.exists(translated_srt_path) and translated_srt_path != original_srt_path:
            subtitle_files.append(translated_srt_path)
            
        main_srt = translated_srt_path if os.path.exists(translated_srt_path) else original_srt_path

        # DEBUG: Log the value of the extend_video_for_dub option
        extend_video_flag = self.options.get("extend_video_for_dub")
        task_logger.info(f"[DEBUG] extend_video_for_dub option from Batch Worker: {extend_video_flag}")

        ctx = CompositionContext(
            video_path=video_path,
            audio_files=audio_files,
            subtitle_files=subtitle_files,
            # Pass the original SRT path separately so the composition logic can identify it
            # and assign the correct title, e.g., "[Generated SRT]".
            generated_srt_path=original_srt_path if self.options.get("embed_whisper_srt") else None,
            main_audio=actual_dub_path,
            main_subtitle=main_srt,
            keep_original_audio=self.options.get("keep_original_audio"),
            remove_vocals=self.options.get("remove_vocals"),
            subtitle_mode=self.options.get("subtitle_mode"),
            extend_video_for_dub=self.options.get("extend_video_for_dub"),
            target_lang=target_lang,
            progress_callback=lambda msg: self._log(msg, video_path, video_name, level="INFO"),
            is_stopped_func=self.isInterruptionRequested,
            logger=task_logger # Pass the dedicated task logger
        )
        # Store the process manager for this composition task
        self.process_managers[video_path] = ctx.process_manager
        return compose_video_core(ctx)

    def stop(self):
        super().stop() # Call base class stop
        self._log(_("Stop signal received, interrupting all tasks..."))
        
        if self.executor:
            # This does not cancel running futures, it just prevents new ones.
            self.executor.shutdown(wait=False, cancel_futures=True)

        # Stop all managed ffmpeg/whisper processes
        for video_path, manager in self.process_managers.items():
            video_name = os.path.splitext(os.path.basename(video_path))[0]
            self._log(_("Stopping processes related to video '{}'...").format(video_name), video_path, "Manager")
            try:
                manager.stop()
            except Exception as e:
                self._log(_("Error while stopping processes: {}").format(e), video_path, "Manager")
        
        self.progress.emit(_("Attempted to interrupt all tasks."))