#!/usr/bin/env python3
"""
阶段1：智能转录处理
使用FunASR进行语音识别和说话人分离
专注于高质量转录和准确的说话人分离
输出：raw_transcript.json/.md
"""

import os
import json
from pathlib import Path
from typing import Dict, List
from datetime import timedelta
from collections import defaultdict
import logging
import warnings
from funasr import AutoModel

# 优化设置
warnings.filterwarnings("ignore")
os.environ['TOKENIZERS_PARALLELISM'] = 'false'  # 避免tokenizer警告

logger = logging.getLogger(__name__)

class Stage1Processor:
    """纯语音转录处理器"""
    
    def __init__(self, input_file: Path, output_dir: Path):
        self.input_file = Path(input_file)
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # 简化的FunASR模型初始化
        logger.info("初始化FunASR模型...")
        
        self.model = AutoModel(
            model="paraformer-zh",  # 使用简单的模型名称
            vad_model="fsmn-vad",   # VAD模型
            punc_model="ct-punc",   # 标点模型
            spk_model="cam++",      # 说话人分离模型
            disable_update=True     # 禁用在线更新
        )
        
        logger.info("FunASR模型初始化完成")
    
    def format_timestamp(self, seconds: float) -> str:
        """格式化时间戳"""
        return str(timedelta(seconds=int(seconds)))
    
    def process(self) -> Dict:
        """简化的语音转录处理"""
        logger.info(f"开始处理音频文件: {self.input_file.name}")
        
        if not self.input_file.exists():
            raise FileNotFoundError(f"音频文件不存在: {self.input_file}")
        
        # 使用FunASR进行语音转录 - 简化参数
        logger.info("正在进行语音转录...")
        result = self.model.generate(
            str(self.input_file),
            batch_size_s=60,  # 批处理大小
            sentence_timestamp=True  # 启用时间戳
        )
        
        if not result or len(result) == 0:
            raise ValueError("语音转录结果为空")
        
        # 解析转录结果
        transcript_data = self._parse_result(result[0])
        
        # 保存转录文件
        output_file = self.output_dir / "stage1" / "raw_transcript.md"
        output_file.parent.mkdir(parents=True, exist_ok=True)
        
        self._save_transcript(transcript_data, output_file)
        
        logger.info(f"语音转录完成，输出文件: {output_file}")
        return {
            "stage1_file": str(output_file),
            "data": transcript_data
        }
    
    def _parse_result(self, result: Dict) -> Dict:
        """解析FunASR转录结果"""
        segments = []
        
        # 处理包含说话人信息的转录结果
        if "sentence_info" in result:
            for sentence in result["sentence_info"]:
                segments.append({
                    "speaker": f"Speaker_{sentence.get('spk', 0)}",
                    "text": sentence["text"].strip(),
                    "start_time": sentence["start"],
                    "end_time": sentence["end"],
                    "timestamp": self.format_timestamp(sentence["start"])
                })
        # 处理简单的转录结果
        elif "sentences" in result:
            for sentence in result["sentences"]:
                segments.append({
                    "speaker": f"Speaker_{sentence.get('spk', 0)}",
                    "text": sentence["text"].strip(),
                    "start_time": sentence["start"],
                    "end_time": sentence["end"],
                    "timestamp": self.format_timestamp(sentence["start"])
                })
        # 如果没有结构化数据，使用纯文本
        else:
            text = result.get("text", "").strip()
            if text:
                segments.append({
                    "speaker": "Speaker_0",
                    "text": text,
                    "start_time": 0,
                    "end_time": 0,
                    "timestamp": "00:00:00"
                })
        
        # 过滤掉空文本
        segments = [s for s in segments if s["text"]]
        
        return {
            "source_file": str(self.input_file),
            "total_segments": len(segments),
            "segments": segments,
            "speaker_count": len(set(s["speaker"] for s in segments)),
            "total_duration": max((s["end_time"] for s in segments), default=0)
        }
    
    def _save_transcript(self, data: Dict, output_file: Path):
        """保存转录结果到文件"""
        output_file.parent.mkdir(parents=True, exist_ok=True)
        
        # 保存Markdown格式
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write("# 会议转录记录\n\n")
            f.write(f"**源文件**: {data['source_file']}\n")
            f.write(f"**总片段数**: {data['total_segments']}\n")
            f.write(f"**说话人数**: {data['speaker_count']}\n")
            f.write(f"**总时长**: {data['total_duration']:.1f}秒\n\n")
            f.write("---\n\n")
            
            # 转录内容
            for segment in data["segments"]:
                f.write(f"## {segment['speaker']} [{segment['timestamp']}]\n\n")
                f.write(f"{segment['text']}\n\n")
                f.write("---\n\n")
        
        # 同时保存JSON格式
        json_file = output_file.with_suffix('.json')
        with open(json_file, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

if __name__ == "__main__":
    import sys
    if len(sys.argv) != 2:
        print("用法: python stage1_raw.py <音频文件>")
        sys.exit(1)
    
    processor = Stage1Processor(sys.argv[1], "../output")
    result = processor.process()
    print(f"处理完成: {result['stage1_file']}")