

import os
import subprocess
import whisper
import configparser
from pathlib import Path
from logger_setup import log

SCRIPT_DIR = Path(__file__).resolve().parent
CONFIG_PATH = SCRIPT_DIR / 'config.ini'

class VideoProcessor:
    """Handles the video-to-text pipeline."""
    def __init__(self):
        self.config = configparser.ConfigParser()
        self.config.read(CONFIG_PATH)
        log.info("VideoProcessor initialized.")
        
        self.video_source_path = Path(self.config.get('Paths', 'video_source').strip())
        self.transcripts_output_path = Path(self.config.get('Paths', 'transcripts_output').strip())
        self.local_model_path = Path(self.config.get('Paths', 'local_model_path').strip())
        
        self.model_name = self.config.get('Whisper', 'model_name').strip()
        self.model_filename = self.config.get('Whisper', 'model_filename').strip()
        self.model_file = self.local_model_path / self.model_filename

        self.supported_formats = ['.mp4', '.mkv', '.mov', '.avi']
        self.model = None

    @staticmethod
    def _format_timestamp(seconds: float) -> str:
        assert seconds >= 0, "non-negative timestamp expected"
        milliseconds = round(seconds * 1000.0)
        hours, milliseconds = divmod(milliseconds, 3_600_000)
        minutes, milliseconds = divmod(milliseconds, 60_000)
        seconds, milliseconds = divmod(milliseconds, 1000)
        return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}"

    def _load_model(self):
        if self.model:
            return True
        log.info("Attempting to load Whisper model.")
        self.local_model_path.mkdir(exist_ok=True)
        model_path = self.local_model_path / f"{self.model_name}.pt"
        if model_path.exists():
            log.info(f"Found local model file: {model_path}")
            self.model = whisper.load_model(str(model_path))
        else:
            log.warning(f"Local model not found. Attempting to download '{self.model_name}'.")
            self.model = whisper.load_model(self.model_name, download_root=self.local_model_path)
        log.info("Whisper model loaded successfully.")
        return True

    def process_all_videos(self) -> list[Path]:
        """
        Finds all videos and processes them, returning a list of transcript paths.
        """
        log.info("--- Starting Video Processing Stage ---")
        if not self._load_model():
            log.critical("Could not load Whisper model. Aborting.")
            return []

        self.transcripts_output_path.mkdir(exist_ok=True)
        video_files = [f for f in self.video_source_path.iterdir() if f.is_file() and f.suffix.lower() in self.supported_formats]

        if not video_files:
            log.warning(f"No supported video files found in {self.video_source_path}.")
            return []

        log.info(f"Found {len(video_files)} video(s) to process: {[f.name for f in video_files]}")
        
        processed_transcripts = []
        for video_path in video_files:
            transcript_path = self._process_single_video(video_path)
            if transcript_path:
                processed_transcripts.append(transcript_path)
        
        log.info("--- Video Processing Stage Complete ---")
        return processed_transcripts

    def _process_single_video(self, video_path: Path) -> Path | None:
        video_name = video_path.stem
        log.info(f"Processing video: {video_path.name}")
        video_output_dir = self.transcripts_output_path / video_name
        video_output_dir.mkdir(exist_ok=True)
        
        temp_audio_path = video_output_dir / f"{video_name}.mp3"
        transcript_path = video_output_dir / "transcript.txt"

        if not self._extract_audio(video_path, temp_audio_path):
            return None

        if not self._transcribe_audio(temp_audio_path, transcript_path):
            return None
        
        self._cleanup(temp_audio_path)
        return transcript_path

    def _extract_audio(self, video_path, audio_path):
        log.info(f"Step 1: Extracting audio to {audio_path}")
        try:
            command = ["ffmpeg", "-i", str(video_path), "-vn", "-ab", "192k", "-ar", "44100", "-f", "mp3", "-y", str(audio_path)]
            subprocess.run(command, check=True, capture_output=True, text=True, encoding='utf-8', errors='ignore')
            return True
        except Exception as e:
            log.error(f"ffmpeg error during audio extraction: {e}", exc_info=True)
            return False

    def _transcribe_audio(self, audio_path, transcript_path):
        log.info("Step 2: Transcribing audio with Whisper...")
        try:
            result = self.model.transcribe(str(audio_path), verbose=False, language='Chinese', task='transcribe')
            with open(transcript_path, "w", encoding="utf-8") as f:
                for segment in result["segments"]:
                    start = self._format_timestamp(segment['start'])
                    end = self._format_timestamp(segment['end'])
                    f.write(f"[{start} --> {end}] {segment['text'].strip()}\n")
            log.info(f"Transcription successful! Saved to: {transcript_path}")
            return True
        except Exception as e:
            log.error(f"Transcription error: {e}", exc_info=True)
            return False

    def _cleanup(self, file_to_delete):
        log.info(f"Step 3: Cleaning up temporary file: {file_to_delete}")
        try:
            os.remove(file_to_delete)
        except OSError as e:
            log.error(f"Error deleting temp file {file_to_delete}: {e}")

