"""

OpenAI Whisper
"""

import os
import logging
import json
from pathlib import Path
from typing import Optional, Dict, List, Any, Callable
import whisper
from tqdm import tqdm
import torch

class WhisperTranscriber:
    """Whisper"""
    
    AVAILABLE_MODELS = ['tiny', 'base', 'small', 'medium', 'large', 'large-v2', 'large-v3']
    SUPPORTED_LANGUAGES = {
        'auto': '',
        'zh': '',
        'en': '',
        'ja': '',
        'ko': '',
        'fr': '',
        'de': '',
        'es': '',
        'ru': '',
        'it': '',
        'pt': ''
    }
    
    def __init__(self, model_name: str = 'base', device: Optional[str] = None):
        self.logger = logging.getLogger(__name__)
        self.model_name = model_name
        self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu')
        self.model = None
        self._load_model()
    
    def _load_model(self):
        """Whisper"""
        try:
            self.logger.info(f"Whisper: {self.model_name}")
            self.model = whisper.load_model(self.model_name, device=self.device)
            self.logger.info(f": {self.device}")
        except Exception as e:
            self.logger.error(f": {e}")
            raise
    
    def change_model(self, model_name: str):
        """"""
        if model_name not in self.AVAILABLE_MODELS:
            raise ValueError(f": {model_name}")
        
        self.model_name = model_name
        self._load_model()
    
    def transcribe_audio(self, audio_path: str, language: Optional[str] = None,
                        progress_callback: Optional[Callable] = None) -> Dict[str, Any]:
        """"""
        try:
            self.logger.info(f": {audio_path}")
            
            # 
            options = {
                'fp16': self.device == 'cuda',
                'verbose': False
            }
            
            if language and language != 'auto':
                options['language'] = language
            
            # 
            if progress_callback:
                progress_callback(0, "...")
            
            result = self.model.transcribe(audio_path, **options)
            
            if progress_callback:
                progress_callback(100, "")
            
            self.logger.info("")
            return result
            
        except Exception as e:
            self.logger.error(f": {e}")
            raise
    
    def transcribe_chunks(self, audio_chunks: List[str], language: Optional[str] = None,
                         progress_callback: Optional[Callable] = None) -> List[Dict[str, Any]]:
        """"""
        results = []
        total_chunks = len(audio_chunks)
        
        for i, chunk_path in enumerate(audio_chunks):
            try:
                if progress_callback:
                    progress = int((i / total_chunks) * 100)
                    progress_callback(progress, f" {i+1}/{total_chunks} ...")
                
                result = self.transcribe_audio(chunk_path, language)
                results.append(result)
                
            except Exception as e:
                self.logger.error(f" {chunk_path}: {e}")
                # 
                results.append({'text': '', 'segments': []})
        
        if progress_callback:
            progress_callback(100, "")
        
        return results
    
    def merge_chunk_results(self, chunk_results: List[Dict[str, Any]], 
                           chunk_duration: int = 600) -> Dict[str, Any]:
        """"""
        merged_text = ""
        merged_segments = []
        
        for i, result in enumerate(chunk_results):
            if not result or 'text' not in result:
                continue
            
            # 
            if merged_text and result['text'].strip():
                merged_text += " " + result['text'].strip()
            elif result['text'].strip():
                merged_text = result['text'].strip()
            
            # 
            if 'segments' in result:
                time_offset = i * chunk_duration
                for segment in result['segments']:
                    adjusted_segment = segment.copy()
                    adjusted_segment['start'] += time_offset
                    adjusted_segment['end'] += time_offset
                    merged_segments.append(adjusted_segment)
        
        return {
            'text': merged_text,
            'segments': merged_segments,
            'language': chunk_results[0].get('language', 'unknown') if chunk_results else 'unknown'
        }
    
    def get_model_info(self) -> Dict[str, str]:
        """"""
        return {
            'model_name': self.model_name,
            'device': self.device,
            'cuda_available': torch.cuda.is_available()
        }