#!/usr/bin/env python3
"""
阶段1：智能转录处理
使用FunASR进行语音识别和说话人分离
专注于高质量转录和准确的说话人分离
输出：raw_transcript.json/.md
"""

import os
import json
import sys
from pathlib import Path
from typing import Dict, List
from datetime import timedelta
from collections import defaultdict
import logging
import warnings
import gc
import subprocess

# 添加项目根目录到 Python 路径
current_dir = Path(__file__).parent.parent.parent.parent
if str(current_dir) not in sys.path:
    sys.path.insert(0, str(current_dir))

# 设置离线模型环境
sys.path.append(str(current_dir / "dialogue_service"))
from local_model_cache import setup_offline_models

# 设置环境变量和模型缓存
setup_offline_models()

from funasr import AutoModel

# 优化设置
warnings.filterwarnings("ignore")
os.environ['TOKENIZERS_PARALLELISM'] = 'false'  # 避免tokenizer警告
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'

logger = logging.getLogger(__name__)

class Stage1Processor:
    """纯语音转录处理器"""
    
    def __init__(self, input_file: Path, output_dir: Path):
        self.input_file = Path(input_file)
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # 模型目录（dialogue_service中的本地模型）
        self.model_dir = Path(__file__).parent.parent.parent / "models"
        
        # 简化的FunASR模型初始化（使用本地模型）
        logger.info("初始化FunASR模型（离线模式）...")
        
        try:
            # 使用支持时间戳的完整模型配置
            self.model = AutoModel(
                model="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",  # 支持时间戳的模型
                vad_model="fsmn-vad",      # VAD模型，用于语音活动检测和分句
                punc_model="ct-punc",      # 标点符号模型
                spk_model="cam++",         # 说话人分离模型
                disable_update=True        # 禁用在线更新
            )
            logger.info("✅ FunASR模型初始化完成（离线模式）")
        except Exception as e:
            logger.error(f"❌ 模型初始化失败: {e}")
            logger.error("请确保已运行: python download_models.py --download")
            raise
    
    def format_timestamp(self, seconds: float) -> str:
        """格式化时间戳"""
        return str(timedelta(seconds=int(seconds)))
    
    def get_audio_duration(self, file_path: Path) -> float:
        """获取音频时长"""
        try:
            result = subprocess.run([
                'ffprobe', '-v', 'error', '-show_entries', 
                'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1',
                str(file_path)
            ], capture_output=True, text=True)
            return float(result.stdout.strip())
        except:
            # 回退方案
            import wave
            with wave.open(str(file_path), 'rb') as wav_file:
                frames = wav_file.getnframes()
                rate = wav_file.getframerate()
                return frames / float(rate)
    
    def check_file_size(self, file_path: Path) -> bool:
        """检查文件大小并给出建议"""
        file_size = file_path.stat().st_size
        file_mb = file_size / (1024 * 1024)
        
        if file_mb > 50:  # 50MB以上建议分割
            logger.warning(f"文件较大 ({file_mb:.1f}MB)，将自动使用分割处理")
            return False
        return True
    
    def process(self) -> Dict:
        """语音转录处理（自适应大小文件）"""
        logger.info(f"开始处理音频文件: {self.input_file.name}")
        
        if not self.input_file.exists():
            raise FileNotFoundError(f"音频文件不存在: {self.input_file}")
        
        # 检查文件大小，自动选择处理方式
        if self.check_file_size(self.input_file):
            return self._process_small_file()
        else:
            return self._process_large_file()
    
    def _process_small_file(self) -> Dict:
        """处理小文件"""
        logger.info("使用直接处理模式")
        
        # 使用FunASR进行语音转录 - 简化参数
        logger.info("正在进行语音转录...")
        result = self.model.generate(
            str(self.input_file),
            batch_size_s=60,  # 批处理大小
            sentence_timestamp=True  # 启用时间戳
        )
        
        if not result or len(result) == 0:
            raise ValueError("语音转录结果为空")
        
        # 解析转录结果
        transcript_data = self._parse_result(result[0])
        
        # 保存转录文件
        output_file = self.output_dir / "stage1" / "raw_transcript.md"
        output_file.parent.mkdir(parents=True, exist_ok=True)
        
        self._save_transcript(transcript_data, output_file)
        
        logger.info(f"语音转录完成，输出文件: {output_file}")
        return {
            "stage1_file": str(output_file),
            "data": transcript_data
        }
    
    def _process_large_file(self, chunk_size: int = 900) -> Dict:
        """处理大文件（自动分割）"""
        logger.info("使用分割处理模式")
        
        temp_dir = self.output_dir / "temp_chunks"
        temp_dir.mkdir(exist_ok=True)
        
        # 检查ffmpeg依赖
        if not self._check_ffmpeg():
            logger.error("❌ 处理大文件需要ffmpeg，请先安装")
            raise RuntimeError("ffmpeg未安装，无法处理大文件")
        
        # 获取音频时长
        duration = self.get_audio_duration(self.input_file)
        logger.info(f"音频总时长: {duration:.2f}秒，开始自动分割处理...")
        
        all_segments = []
        start = 0
        chunk_index = 0
        
        while start < duration:
            end = min(start + chunk_size, duration)
            chunk_file = temp_dir / f"chunk_{chunk_index:03d}.wav"
            
            # 分割音频
            cmd = [
                'ffmpeg', '-i', str(self.input_file),
                '-ss', str(start), '-t', str(end - start),
                '-c', 'copy', str(chunk_file),
                '-y', '-loglevel', 'error'
            ]
            
            max_retries = 3
            retry_count = 0
            
            while retry_count < max_retries:
                try:
                    logger.info(f"处理片段 {chunk_index+1}: {start}-{end}秒 (尝试 {retry_count+1}/{max_retries})")
                    
                    # 添加超时和更好的错误处理
                    result = subprocess.run(cmd, check=True, capture_output=True, timeout=300)
                    logger.info(f"FFmpeg分割成功，片段 {chunk_index+1}")
                    
                    # 检查分割后的文件是否存在且有效
                    if not chunk_file.exists() or chunk_file.stat().st_size == 0:
                        raise ValueError(f"分割后的文件无效: {chunk_file}")
                    
                    # 处理单个片段
                    result = self.model.generate(
                        str(chunk_file),
                        batch_size_s=60,
                        sentence_timestamp=True
                    )
                    
                    if result and len(result) > 0:
                        chunk_data = self._parse_result(result[0])
                        
                        # 调整时间戳
                        for segment in chunk_data['segments']:
                            segment['start_time'] += start * 1000  # 转换为毫秒
                            segment['end_time'] += start * 1000
                            segment['timestamp'] = str(timedelta(seconds=int(segment['start_time']/1000)))
                            all_segments.append(segment)
                    
                    gc.collect()
                    break  # 成功处理，跳出重试循环
                    
                except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
                    retry_count += 1
                    logger.error(f"片段 {chunk_index+1} 处理失败 (尝试 {retry_count}/{max_retries}): {e}")
                    
                    if hasattr(e, 'stderr') and e.stderr:
                        logger.error(f"FFmpeg错误输出: {e.stderr.decode('utf-8', errors='ignore')}")
                    
                    if retry_count >= max_retries:
                        logger.error(f"片段 {chunk_index+1} 达到最大重试次数，跳过此片段")
                        break
                    else:
                        import time
                        time.sleep(2)  # 等待2秒后重试
                        
                except Exception as e:
                    logger.error(f"片段 {chunk_index+1} 处理出现未知错误: {e}")
                    break
                finally:
                    # 清理临时文件
                    if chunk_file.exists():
                        chunk_file.unlink()
            
            start = end
            chunk_index += 1
        
        if not all_segments:
            logger.error("没有成功处理任何片段")
            raise ValueError("大文件处理失败，没有成功处理任何片段")
        
        # 合并结果
        merged_data = {
            "source_file": str(self.input_file),
            "total_segments": len(all_segments),
            "segments": all_segments,
            "speaker_count": len(set(s["speaker"] for s in all_segments)),
            "total_duration": max((s["end_time"] for s in all_segments), default=0)
        }
        
        # 保存合并的转录文件
        output_file = self.output_dir / "stage1" / "raw_transcript.md"
        output_file.parent.mkdir(parents=True, exist_ok=True)
        
        self._save_transcript(merged_data, output_file)
        
        # 清理临时目录
        try:
            temp_dir.rmdir()
        except:
            pass
        
        logger.info(f"大文件处理完成！输出文件: {output_file}")
        return {
            "stage1_file": str(output_file),
            "data": merged_data
        }
    
    def _check_ffmpeg(self) -> bool:
        """检查ffmpeg是否可用"""
        try:
            subprocess.run(['ffmpeg', '-version'], capture_output=True, check=True)
            subprocess.run(['ffprobe', '-version'], capture_output=True, check=True)
            return True
        except (subprocess.CalledProcessError, FileNotFoundError):
            return False
    
    def _parse_result(self, result: Dict) -> Dict:
        """解析FunASR转录结果"""
        segments = []
        
        # 处理包含说话人信息的转录结果
        if "sentence_info" in result:
            for sentence in result["sentence_info"]:
                start_time = sentence.get("start", 0)
                end_time = sentence.get("end", 0)
                segments.append({
                    "speaker": f"Speaker_{sentence.get('spk', 0)}",
                    "text": sentence["text"].strip(),
                    "start_time": start_time,
                    "end_time": end_time,
                    "timestamp": self.format_timestamp(start_time / 1000 if start_time > 1000 else start_time)
                })
        # 处理简单的转录结果
        elif "sentences" in result:
            for sentence in result["sentences"]:
                start_time = sentence.get("start", 0)
                end_time = sentence.get("end", 0)
                segments.append({
                    "speaker": f"Speaker_{sentence.get('spk', 0)}",
                    "text": sentence["text"].strip(),
                    "start_time": start_time,
                    "end_time": end_time,
                    "timestamp": self.format_timestamp(start_time / 1000 if start_time > 1000 else start_time)
                })
        # 检查是否有说话人分离结果
        elif "spk_embedding" in result and "text" in result:
            # 处理说话人分离结果
            text = result.get("text", "").strip()
            spk_info = result.get("spk_embedding", [])
            
            if text and spk_info:
                # 尝试解析说话人信息
                for i, spk_seg in enumerate(spk_info):
                    if isinstance(spk_seg, dict):
                        start_time = spk_seg.get("start", i * 5000)  # 默认每段5秒
                        end_time = spk_seg.get("end", (i + 1) * 5000)
                        speaker_id = spk_seg.get("speaker", i)
                        segment_text = spk_seg.get("text", text if i == 0 else "")
                        
                        if segment_text:
                            segments.append({
                                "speaker": f"Speaker_{speaker_id}",
                                "text": segment_text.strip(),
                                "start_time": start_time,
                                "end_time": end_time,
                                "timestamp": self.format_timestamp(start_time / 1000 if start_time > 1000 else start_time)
                            })
            else:
                # 回退到纯文本
                segments.append({
                    "speaker": "Speaker_0",
                    "text": text,
                    "start_time": 0,
                    "end_time": 0,
                    "timestamp": "00:00:00"
                })
        # 如果没有结构化数据，使用纯文本
        else:
            text = result.get("text", "").strip()
            if text:
                segments.append({
                    "speaker": "Speaker_0",
                    "text": text,
                    "start_time": 0,
                    "end_time": 0,
                    "timestamp": "00:00:00"
                })
        
        # 过滤掉空文本
        segments = [s for s in segments if s["text"]]
        
        return {
            "source_file": str(self.input_file),
            "total_segments": len(segments),
            "segments": segments,
            "speaker_count": len(set(s["speaker"] for s in segments)),
            "total_duration": max((s.get("end_time", 0) for s in segments), default=0)
        }
    
    def _save_transcript(self, data: Dict, output_file: Path):
        """保存转录结果到文件"""
        output_file.parent.mkdir(parents=True, exist_ok=True)
        
        # 保存Markdown格式
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write("# 会议转录记录\n\n")
            f.write(f"**源文件**: {data['source_file']}\n")
            f.write(f"**总片段数**: {data['total_segments']}\n")
            f.write(f"**说话人数**: {data['speaker_count']}\n")
            f.write(f"**总时长**: {data['total_duration']:.1f}秒\n\n")
            f.write("---\n\n")
            
            # 转录内容
            for segment in data["segments"]:
                f.write(f"## {segment['speaker']} [{segment['timestamp']}]\n\n")
                f.write(f"{segment['text']}\n\n")
                f.write("---\n\n")
        
        # 同时保存JSON格式
        json_file = output_file.with_suffix('.json')
        with open(json_file, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)