#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ASR 服务脚本 - 支持参数化配置和进度回调
基于 superplan2.py 改进，用于与 Spring Boot 后端集成
"""

from pathlib import Path
import time
import numpy as np
import sherpa_onnx
import soundfile as sf
import librosa
from tqdm import tqdm
import os
import multiprocessing
import psutil
import json
import sys
from typing import Dict, List, Optional, Callable
from dataclasses import dataclass, asdict

try:
    import pynvml
except ImportError:
    pynvml = None
    print("警告: 未安装 pynvml 库。将无法获取 GPU 占用率和显存统计信息。")


@dataclass
class ASRConfig:
    """ASR 配置类"""
    # 模型配置
    speaker_segmentation_model: str
    speaker_embedding_model: str
    asr_model_path: str
    asr_tokens_path: str
    asr_model_type: str  # whisper, paraformer, telespeech, sense-voice
    
    # Whisper 特定配置
    whisper_encoder: Optional[str] = None
    whisper_decoder: Optional[str] = None
    
    # 说话人配置
    num_speakers: int = -1  # -1 表示自动检测
    cluster_threshold: float = 0.7
    
    # 语言配置
    language: str = "auto"  # auto, zh, yue (粤语)
    
    # 处理配置
    num_processes: int = 1
    num_gpus: int = 1
    
    # ASR 参数
    num_threads: int = 2
    sample_rate: int = 16000
    decoding_method: str = "greedy_search"
    use_itn: bool = True
    provider: str = "cuda"  # cuda 或 cpu


@dataclass
class ProcessProgress:
    """处理进度信息"""
    task_id: str
    file_name: str
    file_index: int
    total_files: int
    stage: str  # loading, diarization, recognition, completed, error
    progress: float  # 0-100
    message: str
    gpu_id: int = -1
    
    def to_json(self) -> str:
        return json.dumps(asdict(self), ensure_ascii=False)


class ProgressCallback:
    """进度回调处理器"""
    def __init__(self, task_id: str, callback_url: Optional[str] = None):
        self.task_id = task_id
        self.callback_url = callback_url
    
    def report(self, progress: ProcessProgress):
        """报告进度"""
        # 打印到标准输出，供 Java 后端读取
        print(f"PROGRESS:{progress.to_json()}", flush=True)
        
        # 如果提供了回调 URL，可以发送 HTTP 请求
        if self.callback_url:
            try:
                import requests
                requests.post(self.callback_url, json=asdict(progress), timeout=5)
            except Exception as e:
                print(f"进度回调失败: {e}", file=sys.stderr)


def resample_audio(audio: np.ndarray, sample_rate: int, target_sample_rate: int):
    """将音频重采样到目标采样率"""
    if sample_rate != target_sample_rate:
        audio = librosa.resample(audio, orig_sr=sample_rate, target_sr=target_sample_rate)
        return audio, target_sample_rate
    return audio, sample_rate


def init_speaker_diarization(config: ASRConfig):
    """初始化说话人日志系统"""
    sd_config = sherpa_onnx.OfflineSpeakerDiarizationConfig(
        segmentation=sherpa_onnx.OfflineSpeakerSegmentationModelConfig(
            pyannote=sherpa_onnx.OfflineSpeakerSegmentationPyannoteModelConfig(
                model=config.speaker_segmentation_model
            ),
        ),
        embedding=sherpa_onnx.SpeakerEmbeddingExtractorConfig(
            model=config.speaker_embedding_model
        ),
        clustering=sherpa_onnx.FastClusteringConfig(
            num_clusters=config.num_speakers,
            threshold=config.cluster_threshold
        ),
        min_duration_on=0.3,
        min_duration_off=0.5,
    )
    
    if not sd_config.validate():
        raise RuntimeError("说话人日志配置验证失败，请检查模型路径")
    
    return sherpa_onnx.OfflineSpeakerDiarization(sd_config)


def init_asr_recognizer(config: ASRConfig):
    """初始化语音识别系统"""
    model_type = config.asr_model_type.lower()
    
    # 根据语言设置调整配置
    language = config.language
    if language == "auto":
        language = "zh"  # 默认中文
    elif language == "yue":
        language = "zh"  # 粤语也使用 zh 标识
    
    if model_type == "whisper":
        if not config.whisper_encoder or not config.whisper_decoder:
            raise ValueError("Whisper 模型需要提供 encoder 和 decoder 路径")
        
        print(f"[ASR Init] 初始化 Whisper 模型")
        return sherpa_onnx.OfflineRecognizer.from_whisper(
            encoder=config.whisper_encoder,
            decoder=config.whisper_decoder,
            tokens=config.asr_tokens_path,
            num_threads=config.num_threads,
            decoding_method=config.decoding_method,
            debug=False,
            language=language,
            task='transcribe',
            provider=config.provider,
        )
    
    elif model_type == "paraformer":
        print(f"[ASR Init] 初始化 Paraformer 模型")
        return sherpa_onnx.OfflineRecognizer.from_paraformer(
            paraformer=config.asr_model_path,
            tokens=config.asr_tokens_path,
            num_threads=config.num_threads,
            sample_rate=config.sample_rate,
            decoding_method=config.decoding_method,
            debug=False,
            provider=config.provider,
        )
    
    elif model_type == "telespeech":
        print(f"[ASR Init] 初始化 Telespeech CTC 模型")
        return sherpa_onnx.OfflineRecognizer.from_telespeech_ctc(
            model=config.asr_model_path,
            tokens=config.asr_tokens_path,
            debug=False,
            provider=config.provider
        )
    
    elif model_type == "sense-voice":
        print(f"[ASR Init] 初始化 Sense Voice 模型")
        return sherpa_onnx.OfflineRecognizer.from_sense_voice(
            model=config.asr_model_path,
            tokens=config.asr_tokens_path,
            language=language,
            use_itn=config.use_itn,
            debug=False,
            provider=config.provider,
        )
    
    else:
        raise ValueError(f"不支持的模型类型: {model_type}")


def extract_audio_segment(audio: np.ndarray, sample_rate: int, start_time: float, end_time: float) -> np.ndarray:
    """从音频中提取指定时间段的片段"""
    start_sample = int(start_time * sample_rate)
    end_sample = int(end_time * sample_rate)
    return audio[start_sample:end_sample]


def recognize_audio_segment(recognizer, audio_segment: np.ndarray, sample_rate: int) -> str:
    """识别音频片段"""
    stream = recognizer.create_stream()
    stream.accept_waveform(sample_rate, audio_segment)
    recognizer.decode_stream(stream)
    return stream.result.text.strip()


def process_single_audio(
    wave_filename: Path,
    gpu_id: int,
    file_idx: int,
    total_files: int,
    config: ASRConfig,
    task_id: str,
    progress_callback: Optional[ProgressCallback] = None
) -> dict:
    """处理单个音频文件"""
    
    # 设置 GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
    
    file_name = Path(wave_filename).name
    
    if progress_callback:
        progress_callback.report(ProcessProgress(
            task_id=task_id,
            file_name=file_name,
            file_index=file_idx,
            total_files=total_files,
            stage="loading",
            progress=0,
            message=f"开始处理文件: {file_name}",
            gpu_id=gpu_id
        ))
    
    file_start_time = time.time()
    gpu_util_percent = 'N/A'
    gpu_used_vram_mb = 'N/A'
    
    # 初始化 NVML
    handle = None
    if pynvml:
        try:
            pynvml.nvmlInit()
            handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
        except Exception as e:
            print(f"[WARNING] NVML 初始化失败: {e}", file=sys.stderr)
    
    # 初始化模型
    try:
        if progress_callback:
            progress_callback.report(ProcessProgress(
                task_id=task_id,
                file_name=file_name,
                file_index=file_idx,
                total_files=total_files,
                stage="loading",
                progress=10,
                message="正在加载模型...",
                gpu_id=gpu_id
            ))
        
        sd = init_speaker_diarization(config)
        recognizer = init_asr_recognizer(config)
    except Exception as e:
        error_msg = f"模型初始化失败: {e}"
        print(f"[ERROR] {error_msg}", file=sys.stderr)
        
        if progress_callback:
            progress_callback.report(ProcessProgress(
                task_id=task_id,
                file_name=file_name,
                file_index=file_idx,
                total_files=total_files,
                stage="error",
                progress=0,
                message=error_msg,
                gpu_id=gpu_id
            ))
        
        if pynvml and handle:
            try:
                pynvml.nvmlShutdown()
            except:
                pass
        return {}
    
    # 读取音频
    try:
        if progress_callback:
            progress_callback.report(ProcessProgress(
                task_id=task_id,
                file_name=file_name,
                file_index=file_idx,
                total_files=total_files,
                stage="loading",
                progress=20,
                message="正在读取音频文件...",
                gpu_id=gpu_id
            ))
        
        audio, original_sample_rate = sf.read(wave_filename, dtype="float32", always_2d=True)
        audio = audio[:, 0]
        audio_duration = len(audio) / original_sample_rate
        
        if original_sample_rate != config.sample_rate:
            audio, current_sample_rate = resample_audio(audio, original_sample_rate, config.sample_rate)
        else:
            current_sample_rate = original_sample_rate
    
    except Exception as e:
        error_msg = f"读取音频文件失败: {e}"
        print(f"[ERROR] {error_msg}", file=sys.stderr)
        
        if progress_callback:
            progress_callback.report(ProcessProgress(
                task_id=task_id,
                file_name=file_name,
                file_index=file_idx,
                total_files=total_files,
                stage="error",
                progress=0,
                message=error_msg,
                gpu_id=gpu_id
            ))
        
        if pynvml and handle:
            try:
                pynvml.nvmlShutdown()
            except:
                pass
        return {}
    
    # 说话人日志
    try:
        if progress_callback:
            progress_callback.report(ProcessProgress(
                task_id=task_id,
                file_name=file_name,
                file_index=file_idx,
                total_files=total_files,
                stage="diarization",
                progress=30,
                message="正在进行说话人分割...",
                gpu_id=gpu_id
            ))
        
        diarization_result = sd.process(audio).sort_by_start_time()
        
        if progress_callback:
            progress_callback.report(ProcessProgress(
                task_id=task_id,
                file_name=file_name,
                file_index=file_idx,
                total_files=total_files,
                stage="recognition",
                progress=50,
                message=f"说话人分割完成，检测到 {len(diarization_result)} 个片段",
                gpu_id=gpu_id
            ))
        
        all_segments = []
        total_segments = len(diarization_result)
        
        for i, segment in enumerate(diarization_result):
            segment_audio = extract_audio_segment(audio, current_sample_rate, segment.start, segment.end)
            if segment_audio.size > 0:
                transcript = recognize_audio_segment(recognizer, segment_audio, current_sample_rate)
                all_segments.append({
                    'index': i + 1,
                    'start': segment.start,
                    'end': segment.end,
                    'speaker': f"speaker_{segment.speaker:02d}",
                    'text': transcript
                })
            
            # 更新识别进度 (50-90%)
            if progress_callback and i % max(1, total_segments // 10) == 0:
                progress = 50 + int((i / total_segments) * 40)
                progress_callback.report(ProcessProgress(
                    task_id=task_id,
                    file_name=file_name,
                    file_index=file_idx,
                    total_files=total_files,
                    stage="recognition",
                    progress=progress,
                    message=f"正在识别 {i+1}/{total_segments} 片段...",
                    gpu_id=gpu_id
                ))
    
    except Exception as e:
        error_msg = f"处理音频内容时出错: {e}"
        print(f"[ERROR] {error_msg}", file=sys.stderr)
        all_segments = []
        
        if progress_callback:
            progress_callback.report(ProcessProgress(
                task_id=task_id,
                file_name=file_name,
                file_index=file_idx,
                total_files=total_files,
                stage="error",
                progress=0,
                message=error_msg,
                gpu_id=gpu_id
            ))
    
    all_segments.sort(key=lambda x: x['start'])
    file_end_time = time.time()
    file_processing_time = file_end_time - file_start_time
    
    # 获取资源占用
    process = psutil.Process(os.getpid())
    cpu_percent = process.cpu_percent(interval=0.1)
    memory_mb = process.memory_info().rss / (1024 ** 2)
    
    if pynvml and handle:
        try:
            gpu_util_info = pynvml.nvmlDeviceGetUtilizationRates(handle)
            gpu_memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
            gpu_util_percent = gpu_util_info.gpu
            gpu_used_vram_mb = gpu_memory_info.used / (1024 ** 2)
        except Exception as e:
            print(f"[WARNING] NVML 读取统计信息失败: {e}", file=sys.stderr)
        finally:
            try:
                pynvml.nvmlShutdown()
            except:
                pass
    
    # 完成
    if progress_callback:
        progress_callback.report(ProcessProgress(
            task_id=task_id,
            file_name=file_name,
            file_index=file_idx,
            total_files=total_files,
            stage="completed",
            progress=100,
            message=f"处理完成，用时 {file_processing_time:.2f}秒",
            gpu_id=gpu_id
        ))
    
    return {
        'file_idx': file_idx,
        'total_files': total_files,
        'wave_filename': str(wave_filename),
        'all_segments': all_segments,
        'audio_duration': audio_duration,
        'file_processing_time': file_processing_time,
        'cpu_percent': cpu_percent,
        'memory_mb': memory_mb,
        'gpu_id': gpu_id,
        'gpu_util_percent': gpu_util_percent,
        'gpu_used_vram_mb': gpu_used_vram_mb,
    }


def save_result(result_data: dict, output_dir: Path):
    """保存处理结果"""
    wave_filename = Path(result_data['wave_filename'])
    output_dir.mkdir(parents=True, exist_ok=True)
    output_filename = output_dir / f"transcript_{wave_filename.stem}_gpu{result_data['gpu_id']}.txt"
    
    try:
        with open(output_filename, 'w', encoding='utf-8') as f:
            f.write(f"文件: {wave_filename}\n")
            f.write(f"GPU ID: {result_data['gpu_id']}\n")
            f.write(f"音频时长: {result_data['audio_duration']:.2f}秒\n")
            f.write(f"处理时间: {result_data['file_processing_time']:.2f}秒\n")
            f.write(f"RTF: {result_data['file_processing_time'] / result_data['audio_duration']:.3f}\n")
            f.write(f"进程CPU占用: {result_data['cpu_percent']:.2f}%\n")
            f.write(f"进程内存占用: {result_data['memory_mb']:.2f} MB\n")
            
            if result_data['gpu_util_percent'] != 'N/A':
                f.write(f"GPU {result_data['gpu_id']} 占用率: {result_data['gpu_util_percent']}%\n")
                f.write(f"GPU {result_data['gpu_id']} VRAM: {result_data['gpu_used_vram_mb']:.2f} MB\n")
            
            f.write(f"\n转录结果:\n")
            for segment in result_data['all_segments']:
                f.write(f"{segment['start']:6.2f}-{segment['end']:6.2f}s | {segment['speaker']}: {segment['text']}\n")
        
        return str(output_filename)
    except Exception as e:
        print(f"[ERROR] 保存结果文件失败: {e}", file=sys.stderr)
        return None


def process_audio_files(
    audio_files: List[Path],
    output_dir: Path,
    config: ASRConfig,
    task_id: str,
    progress_callback: Optional[ProgressCallback] = None
) -> dict:
    """处理多个音频文件"""
    
    total_files = len(audio_files)
    tasks = []
    
    for i, file in enumerate(audio_files):
        gpu_id = i % config.num_gpus
        tasks.append((file, gpu_id, i + 1, total_files, config, task_id, progress_callback))
    
    overall_start_time = time.time()
    all_results = []
    
    try:
        with multiprocessing.Pool(processes=config.num_processes) as pool:
            all_results = pool.starmap(process_single_audio, tasks)
        
        overall_end_time = time.time()
        overall_processing_time = overall_end_time - overall_start_time
        
        # 保存所有结果
        saved_files = []
        for result in all_results:
            if result:
                saved_file = save_result(result, output_dir)
                if saved_file:
                    saved_files.append(saved_file)
        
        total_audio_duration = sum(r.get('audio_duration', 0) for r in all_results if r)
        overall_rtf = overall_processing_time / total_audio_duration if total_audio_duration > 0 else 0
        
        return {
            'success': True,
            'total_files': total_files,
            'processed_files': len([r for r in all_results if r]),
            'total_duration': total_audio_duration,
            'processing_time': overall_processing_time,
            'overall_rtf': overall_rtf,
            'output_files': saved_files,
            'results': all_results
        }
    
    except Exception as e:
        error_msg = f"处理过程中发生错误: {e}"
        print(f"[ERROR] {error_msg}", file=sys.stderr)
        return {
            'success': False,
            'error': error_msg
        }


def main():
    """主函数 - 从命令行参数或配置文件读取配置"""
    if len(sys.argv) < 2:
        print("用法: python asr_service.py <config.json>")
        sys.exit(1)
    
    config_file = sys.argv[1]
    
    try:
        with open(config_file, 'r', encoding='utf-8') as f:
            config_data = json.load(f)
        
        # 解析配置
        config = ASRConfig(**config_data['config'])
        audio_files = [Path(f) for f in config_data['audio_files']]
        output_dir = Path(config_data['output_dir'])
        task_id = config_data.get('task_id', 'default')
        callback_url = config_data.get('callback_url')
        
        # 创建进度回调
        progress_callback = ProgressCallback(task_id, callback_url)
        
        # 处理音频文件
        result = process_audio_files(audio_files, output_dir, config, task_id, progress_callback)
        
        # 输出最终结果
        print(f"RESULT:{json.dumps(result, ensure_ascii=False)}", flush=True)
        
        sys.exit(0 if result['success'] else 1)
    
    except Exception as e:
        error_result = {
            'success': False,
            'error': str(e)
        }
        print(f"RESULT:{json.dumps(error_result, ensure_ascii=False)}", flush=True)
        sys.exit(1)


if __name__ == "__main__":
    main()
