#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
转录质量评估和比较工具
比较不同转录方法的结果质量
"""

import os
import sys
import argparse
import json
import librosa
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd

class TranscriptionEvaluator:
    """转录质量评估器"""

    def __init__(self):
        self.metrics = {}

    def load_transcription_results(self, file_path):
        """加载转录结果"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                if file_path.endswith('.json'):
                    return json.load(f)
                else:
                    # 解析带时间戳的文本文件
                    lines = f.readlines()
                    results = []
                    for line in lines:
                        if line.strip() and '[' in line and ']' in line:
                            # 解析格式: [00:01:23,456] 文本内容
                            time_part = line[line.find('[')+1:line.find(']')]
                            text_part = line[line.find(']')+1:].strip()

                            # 解析时间
                            time_str = time_part.replace(',', '.')
                            h, m, s = map(float, time_str.split(':'))
                            total_seconds = h * 3600 + m * 60 + s

                            results.append({
                                'time': total_seconds,
                                'text': text_part
                            })
                    return results
        except Exception as e:
            print(f"加载文件失败 {file_path}: {e}")
            return None

    def evaluate_timestamp_accuracy(self, results, audio_path):
        """评估时间戳准确性"""
        print(f"🔍 评估时间戳准确性: {audio_path}")

        try:
            # 加载音频
            audio, sr = librosa.load(audio_path, sr=16000)
            total_duration = len(audio) / sr

            if not results:
                return {"error": "无转录结果"}

            # 基本统计
            if isinstance(results, dict) and 'segments' in results:
                segments = results['segments']
            elif isinstance(results, list):
                segments = results
            else:
                return {"error": "结果格式不支持"}

            if not segments:
                return {"error": "无有效段"}

            # 计算时间戳统计
            timestamps = []
            durations = []

            for seg in segments:
                if 'start' in seg and 'end' in seg:
                    timestamps.append(seg['start'])
                    durations.append(seg['end'] - seg['start'])
                elif 'time' in seg:
                    timestamps.append(seg['time'])

            if not timestamps:
                return {"error": "无时间戳数据"}

            # 时间戳分布分析
            timestamp_gaps = []
            for i in range(1, len(timestamps)):
                timestamp_gaps.append(timestamps[i] - timestamps[i-1])

            metrics = {
                "total_segments": len(segments),
                "total_duration": total_duration,
                "avg_segment_duration": np.mean(durations) if durations else 0,
                "min_segment_duration": np.min(durations) if durations else 0,
                "max_segment_duration": np.max(durations) if durations else 0,
                "avg_gap_between_segments": np.mean(timestamp_gaps) if timestamp_gaps else 0,
                "timestamp_coverage": timestamps[-1] / total_duration if timestamps else 0,
                "segments_per_minute": len(segments) / (total_duration / 60) if total_duration > 0 else 0
            }

            return metrics

        except Exception as e:
            return {"error": f"时间戳评估失败: {e}"}

    def evaluate_text_quality(self, results):
        """评估文本质量"""
        print("📝 评估文本质量...")

        try:
            if isinstance(results, dict) and 'segments' in results:
                segments = results['segments']
            elif isinstance(results, list):
                segments = results
            else:
                return {"error": "结果格式不支持"}

            # 提取所有文本
            texts = []
            for seg in segments:
                if 'text' in seg:
                    texts.append(seg['text'])

            if not texts:
                return {"error": "无文本数据"}

            # 文本质量指标
            total_chars = sum(len(text) for text in texts)
            total_words = sum(len(text.split()) for text in texts)

            # 计算填充词比例
            filler_words = ['嗯', '啊', '呃', '这个', '那个', '就是', '然后', '所以']
            filler_count = 0
            for text in texts:
                for filler in filler_words:
                    filler_count += text.count(filler)

            # 计算句子长度分布
            sentence_lengths = [len(text) for text in texts]

            metrics = {
                "total_segments": len(segments),
                "total_characters": total_chars,
                "total_words": total_words,
                "avg_chars_per_segment": np.mean(sentence_lengths),
                "min_chars_per_segment": np.min(sentence_lengths),
                "max_chars_per_segment": np.max(sentence_lengths),
                "filler_word_count": filler_count,
                "filler_word_ratio": filler_count / total_words if total_words > 0 else 0,
                "avg_words_per_segment": total_words / len(segments) if segments else 0
            }

            return metrics

        except Exception as e:
            return {"error": f"文本质量评估失败: {e}"}

    def compare_methods(self, results_dict, audio_path):
        """比较不同转录方法"""
        print("🔄 比较转录方法...")

        comparison = {}

        for method_name, results in results_dict.items():
            print(f"\n📊 分析方法: {method_name}")

            # 时间戳准确性
            timestamp_metrics = self.evaluate_timestamp_accuracy(results, audio_path)
            if "error" not in timestamp_metrics:
                print(f"  ✅ 时间戳评估完成")
                print(f"     总段数: {timestamp_metrics['total_segments']}")
                print(f"     平均段长: {timestamp_metrics['avg_segment_duration']:.2f}秒")
                print(f"     段/分钟: {timestamp_metrics['segments_per_minute']:.1f}")

            # 文本质量
            text_metrics = self.evaluate_text_quality(results)
            if "error" not in text_metrics:
                print(f"  ✅ 文本质量评估完成")
                print(f"     总字符数: {text_metrics['total_characters']}")
                print(f"     填充词比例: {text_metrics['filler_word_ratio']:.2%}")

            comparison[method_name] = {
                "timestamp_metrics": timestamp_metrics,
                "text_metrics": text_metrics
            }

        # 生成比较报告
        self._generate_comparison_report(comparison)

        return comparison

    def _generate_comparison_report(self, comparison):
        """生成比较报告"""
        print("\n" + "="*60)
        print("📈 转录方法比较报告")
        print("="*60)

        # 创建比较表格
        methods = list(comparison.keys())

        if len(methods) >= 2:
            print(f"\n🏆 最佳方法推荐:")

            # 比较时间戳覆盖率
            best_coverage = None
            best_coverage_method = ""
            best_duration_balance = None
            best_duration_method = ""

            for method in methods:
                tm = comparison[method]["timestamp_metrics"]
                if "error" not in tm:
                    coverage = tm.get("timestamp_coverage", 0)
                    avg_duration = tm.get("avg_segment_duration", 0)

                    if best_coverage is None or coverage > best_coverage:
                        best_coverage = coverage
                        best_coverage_method = method

                    # 最佳段长应该是3-8秒
                    duration_score = 1.0 / abs(avg_duration - 5.5) if avg_duration > 0 else 0
                    if best_duration_balance is None or duration_score > best_duration_balance:
                        best_duration_balance = duration_score
                        best_duration_method = method

            print(f"  📡 最高时间戳覆盖率: {best_coverage_method} ({best_coverage:.1%})")
            print(f"  ⚖️ 最佳段长平衡: {best_duration_method}")

        # 保存详细报告
        self._save_detailed_report(comparison)

    def _save_detailed_report(self, comparison):
        """保存详细报告"""
        report = {
            "evaluation_summary": "转录方法质量比较报告",
            "methods_compared": list(comparison.keys()),
            "detailed_metrics": comparison
        }

        report_path = "output/quality_comparison_report.json"
        os.makedirs("output", exist_ok=True)

        with open(report_path, 'w', encoding='utf-8') as f:
            json.dump(report, f, ensure_ascii=False, indent=2)

        print(f"\n📋 详细报告已保存: {report_path}")

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="转录质量评估和比较工具")
    parser.add_argument("audio_file", help="原始音频文件路径")
    parser.add_argument("result_files", nargs="+", help="转录结果文件路径（可多个）")
    parser.add_argument("--output", "-o", default="output", help="输出目录")

    args = parser.parse_args()

    print("=" * 60)
    print("🔍 转录质量评估和比较工具")
    print("=" * 60)

    evaluator = TranscriptionEvaluator()

    # 加载所有转录结果
    results_dict = {}
    method_names = []

    for result_file in args.result_files:
        if not os.path.exists(result_file):
            print(f"⚠️ 文件不存在: {result_file}")
            continue

        # 从文件名推断方法名
        method_name = Path(result_file).stem
        method_names.append(method_name)

        print(f"\n📂 加载结果文件: {result_file}")
        results = evaluator.load_transcription_results(result_file)
        if results:
            results_dict[method_name] = results
            print(f"  ✅ 加载成功: {method_name}")
        else:
            print(f"  ❌ 加载失败: {method_name}")

    if not results_dict:
        print("❌ 没有有效的结果文件")
        sys.exit(1)

    # 执行比较
    comparison = evaluator.compare_methods(results_dict, args.audio_file)

    print(f"\n✅ 质量评估完成！")
    print(f"📁 报告保存在: {args.output}")

if __name__ == "__main__":
    main()