#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ASR文本智能校对脚本
使用LLM对ASR识别的文本进行校对，标注疑似识别错误的词组
"""

import os
import sys
import re
import json
import time
import argparse
from pathlib import Path
from typing import List, Dict, Tuple, Optional
from dataclasses import dataclass
from dotenv import load_dotenv
from openai import OpenAI

# 加载环境变量
load_dotenv()

@dataclass
class LLMConfig:
    """LLM配置类"""
    api_key: str
    base_url: str
    model: str
    max_tokens: int = 4000
    temperature: float = 0.1

@dataclass
class TextSegment:
    """文本段落数据类"""
    timestamp: Optional[str]  # 时间戳，如 [00:01:23,456]
    text: str                 # 文本内容
    original_index: int       # 原始索引

@dataclass
class ProofreadResult:
    """校对结果数据类"""
    original_word: str        # 原词
    suggested_word: str       # 建议词
    confidence: float         # 置信度
    reason: str              # 错误原因
    timestamp: Optional[str] # 时间戳

class TextProofreader:
    """ASR文本校对器"""

    def __init__(self, config: LLMConfig):
        """
        初始化校对器

        Args:
            config: LLM配置
        """
        self.config = config
        self.client = OpenAI(
            api_key=config.api_key,
            base_url=config.base_url
        )
        self.prompt_template = self._build_prompt_template()

    def _build_prompt_template(self) -> str:
        """构建LLM提示词模板"""
        return """你是专业的语音转文字文本校对专家。请校对下面的中文ASR文本，找出识别错误。

重点检查：
- 同音字错误
- 语音相似的词
- 专业术语错误
- 上下文不连贯的地方

如果没有发现错误，返回：{{"errors": []}}

如果发现错误，返回JSON格式：
{{
    "errors": [
        {{"original": "错误词", "suggested": "正确词", "confidence": 0.8, "reason": "原因"}}
    ]
}}

只标注确实错误的地方，置信度0.0-1.0。

待校对文本：
{text_chunk}"""

    def parse_json_transcript(self, json_file_path: str) -> List[TextSegment]:
        """
        解析JSON格式的转录文件

        Args:
            json_file_path: JSON文件路径

        Returns:
            List[TextSegment]: 解析后的文本段落列表
        """
        with open(json_file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)

        segments = []
        for i, segment in enumerate(data['segments']):
            # 转换时间戳为指定格式 [00:01:23,456] - [00:01:25,678]
            start_timestamp = self._seconds_to_timestamp(segment['start_time'])
            end_timestamp = self._seconds_to_timestamp(segment['end_time'])
            timestamp = f"[{start_timestamp}] - [{end_timestamp}]"

            content = segment['text'].strip()
            if content:  # 只添加非空文本
                segments.append(TextSegment(timestamp, content, i))

        return segments

    def _seconds_to_timestamp(self, seconds: float) -> str:
        """
        将秒数转换为 HH:MM:SS,mmm 格式

        Args:
            seconds: 秒数（浮点数）

        Returns:
            str: 格式化的时间戳
        """
        h = int(seconds // 3600)
        m = int((seconds % 3600) // 60)
        s = int(seconds % 60)
        f = int((seconds % 1) * 1000)
        return f"{h:02d}:{m:02d}:{s:02d},{f:03d}"

    def segment_text_for_llm(self, segments: List[TextSegment], max_tokens: int = 3000) -> List[List[TextSegment]]:
        """
        将文本分段，避免超出LLM token限制

        Args:
            segments: 文本段落列表
            max_tokens: 最大token数

        Returns:
            List[List[TextSegment]]: 分段后的文本列表
        """
        chunks = []
        current_chunk = []
        current_tokens = 0

        for segment in segments:
            # 粗略估算token数（中文大约1个字符=1个token）
            segment_tokens = len(segment.text) + 50  # 预留一些空间给格式化字符

            if current_tokens + segment_tokens > max_tokens and current_chunk:
                chunks.append(current_chunk)
                current_chunk = [segment]
                current_tokens = segment_tokens
            else:
                current_chunk.append(segment)
                current_tokens += segment_tokens

        if current_chunk:
            chunks.append(current_chunk)

        return chunks

    def call_llm_proofread(self, text_chunk: str, retry_count: int = 3) -> Optional[Dict]:
        """
        调用LLM进行文本校对

        Args:
            text_chunk: 文本块
            retry_count: 重试次数

        Returns:
            Dict: LLM返回的校对结果
        """
        prompt = self.prompt_template.format(text_chunk=text_chunk)

        for attempt in range(retry_count):
            try:
                response = self.client.chat.completions.create(
                    model=self.config.model,
                    messages=[
                        {"role": "system", "content": "你是一个专业的文本校对专家。"},
                        {"role": "user", "content": prompt}
                    ],
                    max_tokens=self.config.max_tokens,
                    temperature=self.config.temperature
                )

                result_text = response.choices[0].message.content.strip()

                # 清理响应文本，移除markdown代码块标记
                result_text = re.sub(r'```(?:json)?\s*', '', result_text)
                result_text = re.sub(r'```\s*$', '', result_text)
                result_text = result_text.strip()

                # 尝试解析JSON响应
                try:
                    result = json.loads(result_text)
                    return result
                except json.JSONDecodeError as e:
                    print(f"警告: LLM响应JSON解析失败: {e}")
                    print(f"警告: 清理后的响应内容: {result_text[:200]}...")
                    # 尝试提取JSON部分
                    json_match = re.search(r'\{.*\}', result_text, re.DOTALL)
                    if json_match:
                        try:
                            result = json.loads(json_match.group())
                            return result
                        except json.JSONDecodeError as e2:
                            print(f"警告: 提取的JSON也无法解析: {e2}")
                    print(f"警告: 无法从响应中提取有效JSON，跳过此块")

            except Exception as e:
                print(f"警告: LLM调用失败 (尝试 {attempt + 1}/{retry_count}): {e}")
                if attempt < retry_count - 1:
                    time.sleep(2 ** attempt)  # 指数退避

        return None

    def proofread_segments(self, segments: List[TextSegment]) -> Tuple[List[TextSegment], List[ProofreadResult]]:
        """
        校对文本段落

        Args:
            segments: 文本段落列表

        Returns:
            Tuple[List[TextSegment], List[ProofreadResult]]: 校对后的段落和校对结果列表
        """
        print(f"开始校对 {len(segments)} 个文本段落...")

        # 分段处理
        chunks = self.segment_text_for_llm(segments)
        print(f"文本分为 {len(chunks)} 个块进行处理")

        all_results = []

        for chunk_idx, chunk in enumerate(chunks):
            print(f"处理第 {chunk_idx + 1}/{len(chunks)} 块...")

            # 构建文本块
            chunk_text = '\n'.join([seg.text for seg in chunk])

            # 调用LLM校对
            llm_result = self.call_llm_proofread(chunk_text)

            if llm_result and 'errors' in llm_result:
                print(f"第 {chunk_idx + 1} 块校对成功，发现 {len(llm_result['errors'])} 个问题")
                # 将LLM结果应用到文本段落
                chunk_with_proofread = self._apply_proofread_results(chunk, llm_result['errors'])

                # 创建校对结果记录
                for error in llm_result['errors']:
                    result = ProofreadResult(
                        original_word=error['original'],
                        suggested_word=error['suggested'],
                        confidence=error.get('confidence', 0.5),
                        reason=error.get('reason', ''),
                        timestamp=chunk[0].timestamp  # 使用段落的时间戳
                    )
                    all_results.append(result)

                # 更新原始segments
                for i, segment in enumerate(chunk):
                    segments[segment.original_index] = chunk_with_proofread[i]
            else:
                print(f"第 {chunk_idx + 1} 块校对失败或无发现问题")
                if llm_result is None:
                    print(f"   原因: LLM调用失败")
                elif 'errors' not in llm_result:
                    print(f"   原因: LLM响应格式错误，缺少'errors'字段")
                    print(f"   实际响应: {llm_result}")

        return segments, all_results

    def _apply_proofread_results(self, segments: List[TextSegment], errors: List[Dict]) -> List[TextSegment]:
        """
        将校对结果应用到文本段落

        Args:
            segments: 原始段落列表
            errors: 错误列表

        Returns:
            List[TextSegment]: 应用校对结果后的段落列表
        """
        updated_segments = []

        for segment in segments:
            updated_text = segment.text

            # 对每个错误进行替换标注
            for error in errors:
                original = error['original']
                suggested = error['suggested']

                # 使用标记格式标注疑似错误
                marked_error = f"【?{original}→{suggested}?】"
                updated_text = updated_text.replace(original, marked_error)

            updated_segment = TextSegment(
                timestamp=segment.timestamp,
                text=updated_text,
                original_index=segment.original_index
            )
            updated_segments.append(updated_segment)

        return updated_segments

    def save_results(self, segments: List[TextSegment], results: List[ProofreadResult],
                    input_file: str, output_dir: str):
        """
        保存校对结果

        Args:
            segments: 校对后的文本段落
            results: 校对结果列表
            input_file: 输入文件路径
            output_dir: 输出目录
        """
        input_path = Path(input_file)
        output_path = Path(output_dir)
        output_path.mkdir(exist_ok=True)

        base_name = input_path.stem

        # 保存带时间戳的标注文本
        timestamped_file = output_path / f"{base_name}_proofread.txt"
        with open(timestamped_file, 'w', encoding='utf-8') as f:
            for segment in segments:
                if segment.timestamp:
                    f.write(f"{segment.timestamp} {segment.text}\n")
                else:
                    f.write(f"{segment.text}\n")

        # 保存纯标注文本
        clean_file = output_path / f"{base_name}_proofread_clean.txt"
        with open(clean_file, 'w', encoding='utf-8') as f:
            for segment in segments:
                f.write(f"{segment.text}\n")

        # 保存详细报告
        report_file = output_path / f"{base_name}_proofread_report.json"
        report_data = {
            "input_file": str(input_file),
            "total_segments": len(segments),
            "total_errors": len(results),
            "errors": [
                {
                    "timestamp": r.timestamp,
                    "original": r.original_word,
                    "suggested": r.suggested_word,
                    "confidence": r.confidence,
                    "reason": r.reason
                }
                for r in results
            ]
        }

        with open(report_file, 'w', encoding='utf-8') as f:
            json.dump(report_data, f, ensure_ascii=False, indent=2)

        print(f"校对结果已保存:")
        print(f"   带时间戳标注: {timestamped_file}")
        print(f"   纯文本标注: {clean_file}")
        print(f"   详细报告: {report_file}")
        print(f"   发现 {len(results)} 个疑似错误")

def load_llm_config() -> LLMConfig:
    """从环境变量加载LLM配置"""
    api_key = os.getenv('LLM_API_KEY')
    base_url = os.getenv('LLM_BASE_URL')
    model = os.getenv('LLM_MODEL', 'gpt-3.5-turbo')
    max_tokens = int(os.getenv('LLM_MAX_TOKENS', '4000'))
    temperature = float(os.getenv('LLM_TEMPERATURE', '0.1'))

    if not api_key:
        raise ValueError("未找到 LLM_API_KEY 环境变量，请检查 .env 文件")

    if not base_url:
        raise ValueError("未找到 LLM_BASE_URL 环境变量，请检查 .env 文件")

    return LLMConfig(
        api_key=api_key,
        base_url=base_url,
        model=model,
        max_tokens=max_tokens,
        temperature=temperature
    )

def main():
    """主函数"""
    parser = argparse.ArgumentParser(
        description='ASR文本智能校对工具（JSON格式）',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
使用示例:
  python scripts/proofread_text.py output/test.json
  python scripts/proofread_text.py output/test.json -o proofread_output
  python scripts/proofread_text.py output/test.json --confidence-threshold 0.8
        """
    )

    parser.add_argument('input_file', help='输入的JSON转录文件路径')
    parser.add_argument('-o', '--output', default='output', help='输出目录 (默认: output)')
    parser.add_argument('--confidence-threshold', type=float, default=0.0,
                       help='置信度阈值，只显示高于此阈值的错误 (默认: 0.0)')
    parser.add_argument('--model', help='覆盖使用的LLM模型')
    parser.add_argument('--max-tokens', type=int, help='覆盖LLM最大token数')
    parser.add_argument('--temperature', type=float, help='覆盖LLM温度参数')

    args = parser.parse_args()

    # 检查输入文件
    if not os.path.exists(args.input_file):
        print(f"错误: JSON文件不存在: {args.input_file}")
        sys.exit(1)

    # 检查文件扩展名
    if not args.input_file.lower().endswith('.json'):
        print(f"错误: 只支持JSON格式的输入文件: {args.input_file}")
        sys.exit(1)

    try:
        # 加载配置
        config = load_llm_config()

        # 应用命令行参数覆盖
        if args.model:
            config.model = args.model
        if args.max_tokens:
            config.max_tokens = args.max_tokens
        if args.temperature:
            config.temperature = args.temperature

        print(f"使用LLM配置: {config.model} (max_tokens={config.max_tokens}, temperature={config.temperature})")

        # 创建校对器
        proofreader = TextProofreader(config)

        # 解析JSON格式的转录文件
        print(f"读取JSON文件: {args.input_file}")
        segments = proofreader.parse_json_transcript(args.input_file)
        print(f"解析得到 {len(segments)} 个文本段落")

        # 执行校对
        proofread_segments, results = proofreader.proofread_segments(segments)

        # 过滤结果（根据置信度阈值）
        filtered_results = [r for r in results if r.confidence >= args.confidence_threshold]
        if args.confidence_threshold > 0:
            print(f"置信度阈值 {args.confidence_threshold}，过滤后剩余 {len(filtered_results)} 个错误")

        # 保存结果
        proofreader.save_results(proofread_segments, filtered_results, args.input_file, args.output)

        print("校对完成！")

    except Exception as e:
        import traceback
        print(f"错误: 校对过程中发生错误: {e}")
        print("完整错误信息:")
        traceback.print_exc()
        sys.exit(1)

if __name__ == "__main__":
    main()