#!/usr/bin/env python3
"""
统一的会议转PRD主程序
自动适配文件大小，智能选择处理方式
"""

import os
import sys
from pathlib import Path

# 添加父目录(FunASR)到Python路径，以便导入funasr模块
current_dir = Path(__file__).parent
parent_dir = current_dir.parent
if str(parent_dir) not in sys.path:
    sys.path.insert(0, str(parent_dir))

import gc
import json
import warnings
import logging
import subprocess
from datetime import timedelta
import multiprocessing

# 配置日志和警告
warnings.filterwarnings("ignore")
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 设置环境变量防止段错误
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'

def check_dependencies():
    """检查必要的依赖"""
    dependencies = {
        'ffmpeg': ['ffmpeg', '-version'],
        'ffprobe': ['ffprobe', '-version']
    }
    
    for name, cmd in dependencies.items():
        try:
            subprocess.run(cmd, capture_output=True, check=True)
            logger.info(f"✅ {name} 已安装")
        except (subprocess.CalledProcessError, FileNotFoundError):
            logger.warning(f"⚠️  {name} 未安装，大文件处理可能受限")
            return False
    return True

def get_audio_duration(file_path):
    """获取音频时长"""
    try:
        result = subprocess.run([
            'ffprobe', '-v', 'error', '-show_entries', 
            'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1',
            str(file_path)
        ], capture_output=True, text=True)
        return float(result.stdout.strip())
    except:
        # 回退方案
        import wave
        with wave.open(str(file_path), 'rb') as wav_file:
            frames = wav_file.getnframes()
            rate = wav_file.getframerate()
            return frames / float(rate)

def check_file_size(file_path):
    """检查文件大小并给出建议"""
    file_size = Path(file_path).stat().st_size
    file_mb = file_size / (1024 * 1024)
    
    if file_mb > 50:  # 50MB以上建议分割
        logger.warning(f"文件较大 ({file_mb:.1f}MB)，将自动使用分割处理")
        return False
    return True

def split_and_process_large_file(input_file: str, output_dir: str, chunk_size: int = 900):
    """自动分割大文件并处理"""
    try:
        from stages.stage1_raw import Stage1Processor
        from stages.stage2_annotate import Stage2Processor
        from stages.stage3_report import Stage3ReportGenerator
        
        input_path = Path(input_file)
        output_path = Path(output_dir)
        temp_dir = output_path / "temp_chunks"
        temp_dir.mkdir(exist_ok=True)
        
        # 检查ffmpeg依赖
        has_ffmpeg = check_dependencies()
        if not has_ffmpeg:
            logger.error("❌ 处理大文件需要ffmpeg，请先安装: brew install ffmpeg")
            return None
        
        # 获取音频时长
        duration = get_audio_duration(input_file)
        logger.info(f"音频总时长: {duration:.2f}秒，开始自动分割处理...")
        
        all_segments = []
        start = 0
        chunk_index = 0
        
        while start < duration:
            end = min(start + chunk_size, duration)
            chunk_file = temp_dir / f"chunk_{chunk_index:03d}.wav"
            
            # 分割音频
            cmd = [
                'ffmpeg', '-i', str(input_file),
                '-ss', str(start), '-t', str(end - start),
                '-c', 'copy', str(chunk_file),
                '-y', '-loglevel', 'error'
            ]
            
            try:
                subprocess.run(cmd, check=True, capture_output=True)
                logger.info(f"处理片段 {chunk_index+1}: {start}-{end}秒")
                
                # 处理单个片段
                stage1 = Stage1Processor(chunk_file, output_path)
                stage1_result = stage1.process()
                
                # 调整时间戳
                for segment in stage1_result['data']['segments']:
                    segment['start_time'] += start * 1000  # 转换为毫秒
                    segment['end_time'] += start * 1000
                    segment['timestamp'] = str(timedelta(seconds=int(segment['start_time']/1000)))
                    all_segments.append(segment)
                
                gc.collect()
                
            except subprocess.CalledProcessError as e:
                logger.error(f"分割失败: {e}")
                break
            finally:
                # 清理临时文件
                if chunk_file.exists():
                    chunk_file.unlink()
            
            start = end
            chunk_index += 1
        
        if not all_segments:
            logger.error("没有成功处理任何片段")
            return None
        
        # 合并结果继续后续处理
        merged_data = {
            "source_file": str(input_path),
            "total_segments": len(all_segments),
            "segments": all_segments
        }
        
        # 保存合并的转录文件
        merged_file = output_path / "stage1" / "raw_transcript.md"
        merged_file.parent.mkdir(parents=True, exist_ok=True)
        
        with open(merged_file, 'w', encoding='utf-8') as f:
            f.write("# 会议原始转录（合并版）\n\n")
            f.write(f"**源文件**: {merged_data['source_file']}\n")
            f.write(f"**总片段数**: {merged_data['total_segments']}\n\n")
            f.write("---\n\n")
            
            for segment in merged_data["segments"]:
                f.write(f"## {segment['speaker']} [{segment['timestamp']}]\n")
                f.write(f"{segment['text']}\n\n")
        
        # 保存JSON格式
        json_file = merged_file.with_suffix('.json')
        with open(json_file, 'w', encoding='utf-8') as f:
            json.dump(merged_data, f, ensure_ascii=False, indent=2)
        
        # 继续后续处理阶段
        stage1_result = {"stage1_file": str(merged_file), "data": merged_data}
        
        # 阶段2: 人工标注
        logger.info("开始人工标注...")
        stage2 = Stage2Processor(stage1_result, output_path)
        stage2_result = stage2.process()
        
        gc.collect()
        
        # 阶段3: 报告生成
        logger.info("开始报告生成...")
        stage3 = Stage3ReportGenerator(stage2_result, output_path)
        final_prd = stage3.process()
        
        # 清理临时目录
        try:
            temp_dir.rmdir()
        except:
            pass
        
        logger.info(f"大文件处理完成！最终PRD: {final_prd}")
        return final_prd
        
    except Exception as e:
        logger.error(f"大文件处理失败: {e}")
        import traceback
        traceback.print_exc()
        return None

def process_with_timeout(input_file: str, output_dir: str = "dialogue_process/output", timeout: int = 600):
    """带超时的处理流程（小文件）"""
    from stages.stage1_raw import Stage1Processor
    from stages.stage2_annotate import Stage2Processor
    from stages.stage3_report import Stage3ReportGenerator
    
    input_path = Path(input_file)
    output_path = Path(output_dir)
    
    try:
        # 阶段1: 原始转录
        logger.info(f"开始处理: {input_path}")
        stage1 = Stage1Processor(input_path, output_path)
        stage1_result = stage1.process()
        
        # 强制垃圾回收
        gc.collect()
        
        # 阶段2: 人工标注（自动模式）
        logger.info("开始人工标注...")
        stage2 = Stage2Processor(stage1_result, output_path)
        stage2_result = stage2.process()
        
        gc.collect()
        
        # 阶段3: 报告生成
        logger.info("开始报告生成...")
        stage3 = Stage3ReportGenerator(stage2_result, output_path)
        final_prd = stage3.process()
        
        logger.info(f"处理完成！最终PRD: {final_prd}")
        return final_prd
        
    except Exception as e:
        logger.error(f"处理失败: {e}")
        import traceback
        traceback.print_exc()
        return None

def main():
    """主函数 - 自适应处理"""
    import argparse
    
    parser = argparse.ArgumentParser(description="统一的会议录音转PRD工具 - 自动适配文件大小")
    parser.add_argument("input", help="输入音频文件路径")
    parser.add_argument("--output", default="dialogue_process/output", help="输出目录")
    parser.add_argument("--timeout", type=int, default=600, help="超时时间（秒）")
    parser.add_argument("--chunk-size", type=int, default=300, help="分段大小（秒）")
    
    args = parser.parse_args()
    
    if not os.path.exists(args.input):
        logger.error(f"输入文件不存在: {args.input}")
        sys.exit(1)
    
    # 智能选择处理方式
    file_size_mb = Path(args.input).stat().st_size / (1024 * 1024)
    
    if file_size_mb > 50:
        logger.info(f"检测到文件较大 ({file_size_mb:.1f}MB)，自动使用分割处理模式")
        result = split_and_process_large_file(args.input, args.output, args.chunk_size)
    else:
        logger.info(f"文件较小 ({file_size_mb:.1f}MB)，使用直接处理模式")
        result = process_with_timeout(args.input, args.output, args.timeout)
    
    if result:
        print(f"✅ 处理成功！PRD文件: {result}")
    else:
        print("❌ 处理失败")
        sys.exit(1)

if __name__ == "__main__":
    # 在Windows上设置多进程启动方式
    if sys.platform.startswith('win'):
        multiprocessing.set_start_method('spawn')
    
    main()