from openai import OpenAI
import json
import os
import datetime
from typing import Dict, List, Optional, Tuple
import logging
from dataclasses import dataclass, asdict
import pandas as pd
from tqdm import tqdm
import asyncio
import aiohttp
import threading
from queue import Queue

DOCUMENT_EDIT_LABELS = [
    # ===== 拼写类 =====
    {"label_id": 1, "type": "拼写-英文专业名词大小写", "detail": "英文专业术语、单位、品牌名等的大小写不规范，需要统一为标准形式，如cpu→CPU、api→API、github→GitHub、8gb→8GB、Install.md→install.md", "unit": "句子"},
    {"label_id": 2, "type": "拼写-英文同词大小写一致", "detail": "同一章节内相同的非专业英文单词大小写不一致，需要统一为同一形式，如test/Test/TEST在同一章节内应统一为test", "unit": "章节"},
    {"label_id": 3, "type": "拼写-中文相似字错误", "detail": "中文词汇使用了同音字或形近字，需要替换为正确汉字，如区块连→区块链、阀值→阈值、布署→部署", "unit": "句子"},
    {"label_id": 4, "type": "拼写-中文误触多余字", "detail": "因键盘误操作产生的重复字符，需要删除多余字符，如安装包包→安装包、打开开文档→打开文档", "unit": "句子"},

    # ===== 超链接类 =====
    {"label_id": 5, "type": "超链接-描述与格式错误", "detail": "超链接的文字描述与目标不符、格式不规范或地址错误，需要更正链接描述、补充格式标记或修正地址", "unit": "超链接"},

    # ===== 空格类 =====
    {"label_id": 6, "type": "空格-中英文与标点空格错误", "detail": "空格使用不规范，包括纯中文间的多余空格、中英文混排时英文前后缺少空格、中英文标点前后空格不当等问题", "unit": "句子/代码行"},

    # ===== 标点类 =====
    {"label_id": 7, "type": "标点-中英文标点使用错误", "detail": "中英文标点符号混用，如英文代码中使用中文标点、中文文本中使用英文标点，需要统一标点类型", "unit": "代码行/句子"},
    {"label_id": 8, "type": "标点-顿号使用错误", "detail": "顿号使用场景不当，应该仅在句子内并列词语间使用顿号，其他场景需改用适当标点", "unit": "句子"},
    {"label_id": 9, "type": "标点-成对标点缺失", "detail": "成对标点符号缺少一侧，需要补齐缺失的引号、括号、书名号等", "unit": "句子/段落"},
    {"label_id": 10, "type": "标点-同级内容标点一致", "detail": "同级内容的标点使用不统一，需要调整为相同的标点规则，确保段落结尾、列表项等标点保持一致", "unit": "段落/表格/列表"},

    # ===== 格式 & 版式类 =====
    {"label_id": 11, "type": "格式-换行不一致", "detail": "同级内容的换行格式不统一，需要调整为相同的换行逻辑和缩进规范", "unit": "段落"},
    {"label_id": 12, "type": "版式-视觉排版优化", "detail": "文档中存在影响视觉一致性和阅读体验的排版问题，包括图片位置不当（如遮挡文字、偏离语义位置）、行间距/段落间距不统一、对齐方式混乱等，需调整布局以提升可读性", "unit": "段落/文档"},
    # ===== 标记与代码类 =====
    {"label_id": 13, "type": "标记-文档标记规范化", "detail": "文档标记语法错误或格式不统一，包括Markdown、HTML、XML标签语法错误，格式风格不一致，文档内路径描述错误等", "unit": "文档/代码块"},
    {"label_id": 14, "type": "标记-代码块规范", "detail": "代码块缺语言标识、命令语法错误、注释符号错误等（如“```→```python”“//注释→#注释”）", "unit": "代码块"},

    # ===== 文件与路径类 =====
    {"label_id": 15, "type": "文件-命名规范", "detail": "文件名语义不当或扩展名错误，需要更正文件名含义或修正扩展名类型，不含文件名大小写错误", "unit": "文件"},
    {"label_id": 16, "type": "文件-路径规范", "detail": "实际文件路径错误，需要更正为正确的文件系统路径", "unit": "文件"},

    # ===== 表达类 =====
    {"label_id": 17, "type": "表达-语义优化", "detail": "句子表达不够准确或流畅，需要优化语义表达以提升内容的准确性和可读性", "unit": "句子/段落"},
    {"label_id": 18, "type": "表达-冗余精简", "detail": "存在语义重复或冗余表达，需要删除重复内容并精简表述", "unit": "句子/段落"},

    # ===== 结构类 =====
    {"label_id": 19, "type": "结构-章节顺序优化", "detail": "章节编号或逻辑顺序不合理，需要调整为正确的章节序号和逻辑结构", "unit": "章节/文档"},

    # ===== 标题类 =====
    {"label_id": 20, "type": "标题-语义优化", "detail": "标题语义不够准确或描述性不强，需要优化标题内容使其更好地概括章节内容", "unit": "标题"},

    # ===== 信息类=====
    {"label_id": 21, "type": "信息-完整性补充", "detail": "信息不完整或缺失关键内容，需要补充操作步骤、参数说明、技术背景等必要信息", "unit": "句子/段落"},

    {"label_id": 22, "type": "其他", "detail": "不属于以上任何类别的情况", "unit": "文档/句子"}
]



# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

@dataclass
class EvaluationResult:
    """评估结果数据类"""
    is_valid: bool
    valid_reason: str
    label_id: Optional[str] = None
    label_name: Optional[str] = None
    label_reason: Optional[str] = None
    error: Optional[str] = None
    raw_output: Optional[str] = None
    # 多次分类投票的聚合信息
    votes_for_winner: Optional[int] = None
    total_votes: Optional[int] = None
    vote_distribution: Optional[Dict[str, int]] = None
    category: Optional[str] = None  # auto_pass | weak_consensus | confusing | invalid

class ThreadSafeFileWriter:
    """线程安全的文件写入器"""
    
    def __init__(self, file_path: str):
        self.file_path = file_path
        self.lock = threading.Lock()
        self.file_handle = open(file_path, 'w', encoding='utf-8')
    
    def write(self, content: str):
        with self.lock:
            self.file_handle.write(content)
            self.file_handle.flush()
    
    def close(self):
        with self.lock:
            if not self.file_handle.closed:
                self.file_handle.close()

class AsyncDocumentQualityEvaluator:
    """异步文档编辑评估器"""
    
    def __init__(self, api_key: str, base_url: str, model_name: str = "Qwen/Qwen3-8B", max_concurrency: int = 10):
        self.api_key = api_key
        self.base_url = base_url.rstrip("/") + "/v1/chat/completions"
        self.model_name = model_name
        self.max_concurrency = max_concurrency
        self.evaluation_results = []
        self.output_files = {}  # 存储线程安全的文件写入器
        # 多轮分类设置
        self.num_classify_runs = int(os.getenv('NUM_CLASSIFY_RUNS', '5'))
        
    def load_samples(self, file_path: str) -> List[Dict]:
        """加载测试样本"""
        samples = []
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                for line in f:
                    if line.strip():
                        sample = json.loads(line)
                        # 只保留 .md 文件的数据
                        if sample.get('file_path', '').endswith('.md'):
                            samples.append(sample)
            logger.info(f"成功加载 {len(samples)} 个 .md 文件样本")
        except Exception as e:
            logger.error(f"加载样本文件失败: {e}")
            raise
        return samples
    
    def construct_context(self, sample: Dict) -> Tuple[str, str, str]:
        """构造评估所需的上下文"""
        full_document = (
            sample.get('context_before', '') + 
            sample.get('remove_content', '') + 
            sample.get('context_after', '')
        )
        return full_document, sample.get('remove_content', ''), sample.get('add_content', '')
        
    def _build_tags_description(self) -> str:
        """构建标签描述字符串"""
        return "\n".join(f"{tag['label_id']}. {tag['type']}: {tag['detail']} ({tag['unit']})" 
                        for tag in DOCUMENT_EDIT_LABELS)
    
    def _write_error_log(self, error_entry: str):
        """写入错误日志"""
        if hasattr(self, 'output_files') and 'error' in self.output_files:
            self.output_files['error'].write(error_entry)
        else:
            # 如果文件还未初始化，直接写入到控制台
            print(error_entry)

    def create_validity_prompt(self, sample: Dict) -> str:
        """创建仅用于有效性判断的提示词（步骤1）"""
        full_doc, original, modified = self.construct_context(sample)
        tags_description = self._build_tags_description()
        tags_count = len(DOCUMENT_EDIT_LABELS)
        
        prompt = f"""你是一位专业的技术文档优化模型训练师。请只完成"步骤1 有效性判断"，不要进行分类。

## 参考标准
以下是我们定义的{tags_count}个文档编辑优化标签（括号内为推荐的unit粒度），用于指导你判断什么样的修改是有意义的：
{tags_description}

关于unit（粒度）
- unit用于说明该类修改主要发生在何种粒度：句子/段落/代码块/文件/章节/标题/超链接/文档等。
- 当判断有效性时，可参考与修改相匹配的unit：局部表达更可能是句子/代码行；结构或规范类更可能是段落/章节/文件/代码块/文档。

## 有效性判断原则
基于上述标签体系，判断本次修改是否具备足够的优化教学价值：
- 如果修改属于上述标签中的某一类，说明它是「有效优化样本」
- 如果修改过于细微、不具普适性，或属于编辑者个人偏好，则视为「无效训练样本」

## 文档内容
当前修改单元ID：{sample.get('unit_id')}
### 完整上下文：
{full_doc}
### 修改详情：
- **修改前内容**：{original}
- **修改后内容**：{modified}

## 评估任务（仅执行步骤1）
### 步骤1：判断是否具备训练价值（is_valid）
基于上述标签体系，判断本次修改是否具备足够的优化教学价值：

有效修改特征（通常可在上述标签中找到对应项）：
- 改进用词表达，使语义更清晰准确
- 修复语法、拼写、标点、空格、术语格式等规范性问题
- 提升结构条理、并列表达一致性与版式统一
- 增添关键上下文、示例或澄清含义以补全信息
- 优化标题/层级或章节/文件结构
- 规范化文档标记与代码块（语言标识、标记语法、代码格式）

无效修改特征（不属于上述标签范围）：
- 内容等价，修改过于细微（如换行位置、空格数量微调）
- 编辑者习惯或风格差异，不具普适优化规律
- 仅格式或标点微调，缺乏学习价值
- 删除/替换造成信息缺失
- 属于个人写作偏好，不具通用性

## 输出要求
请严格返回如下JSON，仅包含有效性判断：
{{
"is_valid": true/false,
"valid_reason": "为什么是有效/无效"
}}
"""
        return prompt

    def create_classification_prompt(self, sample: Dict) -> str:
        """创建仅用于分类的提示词（步骤2），仅在有效时调用"""
        full_doc, original, modified = self.construct_context(sample)
        tags_description = self._build_tags_description()
        tags_count = len(DOCUMENT_EDIT_LABELS)
        max_label_id = max(tag['label_id'] for tag in DOCUMENT_EDIT_LABELS)
        prompt = f"""你是一位专业的技术文档优化模型训练师。该修改已被判定为“具备训练价值”。请只完成“步骤2 分类标签匹配”。

## 文档内容（用于参考分类）：
### 完整上下文：
{full_doc}
### 修改详情：
- **修改前内容**：{original}
- **修改后内容**：{modified}
当前修改单元ID：{sample.get('unit_id')}

## 步骤2：从下列 {tags_count} 个优化类型标签中选择最贴合的一项
{tags_description}

选择原则（结合各标签中的unit）：
- 如果多个标签均可覆盖该修改，优先选择其unit与本次修改主要发生粒度相匹配的标签（句子/段落/代码块/文件/章节/标题/超链接/文档等）。
- 若修改跨越多个粒度，请以主要改动发生的粒度为准。

## 输出要求
请严格返回如下JSON，仅包含分类信息：
{{
"label_id": "1-{max_label_id}",
"label_name": "标签名称",
"label_reason": "为何选择此标签"
}}
"""
        return prompt
        
    async def call_model_async(self, session: aiohttp.ClientSession, prompt: str) -> str:
        """异步调用模型进行评估"""
        payload = {
            "model": self.model_name,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": 0.7,
            "max_tokens": 4096
        }
        
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json",
        }
        
        try:
            async with session.post(self.base_url, headers=headers, json=payload) as resp:
                resp.raise_for_status()
                result = await resp.json()
                return result['choices'][0]['message']['content']
        except Exception as e:
            # 将模型调用失败的信息写入error.log文件
            error_entry = f"""{'='*80}
时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
错误类型: 模型调用失败
错误信息: {str(e)}
模型名称: {self.model_name}
API地址: {self.base_url}
{'='*80}

"""
            self._write_error_log(error_entry)
            
            logger.error(f"模型调用失败: {e}")
            raise
    
    def parse_model_output(self, output: str) -> EvaluationResult:
        """解析模型输出"""
        try:
            output = output.split("</think>")[-1].strip()
            # 尝试提取JSON
            json_start = output.find('{')
            json_end = output.rfind('}') + 1
            
            if json_start != -1 and json_end > json_start:
                json_str = output[json_start:json_end]
                data = json.loads(json_str)
                
                # 构建评估结果
                result = EvaluationResult(
                    is_valid=data.get('is_valid', False),
                    valid_reason=data.get('valid_reason', ''),
                    label_id=data.get('label_id'),
                    label_name=data.get('label_name'),
                    label_reason=data.get('label_reason'),
                    raw_output=output
                )
                return result
            else:
                # 未找到有效的JSON输出
                raise ValueError("未找到有效的JSON输出")
                
        except Exception as e:
            # 统一处理所有解析错误，避免重复写入
            error_type = "未找到有效的JSON输出" if "未找到有效的JSON输出" in str(e) else "解析模型输出失败"
            
            # 将错误的模型输出写入error.log文件
            error_entry = f"""{'='*80}
时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
错误类型: {error_type}
错误信息: {str(e)}
原始输出:
{output}
{'='*80}

"""
            self._write_error_log(error_entry)
            
            logger.error(f"解析模型输出失败: {e}")
            return EvaluationResult(
                is_valid=False,
                valid_reason="解析失败",
                error=str(e),
                raw_output=output
            )
    
    async def evaluate_sample_async(self, session: aiohttp.ClientSession, sample: Dict) -> Tuple[Dict, EvaluationResult]:
        """异步评估单个样本"""
        try:
            # 第一步：先进行有效性判断
            validity_prompt = self.create_validity_prompt(sample)
            validity_output = await self.call_model_async(session, validity_prompt)
            validity_result = self.parse_model_output(validity_output)

            # 若无效，直接返回（不进行分类）
            if not validity_result.is_valid:
                return sample, EvaluationResult(
                    is_valid=False,
                    valid_reason=validity_result.valid_reason,
                    label_id=None,
                    label_name=None,
                    label_reason=None,
                    error=validity_result.error,
                    raw_output=validity_result.raw_output
                )

            # 第二步：仅对有效样本进行分类（进行多次投票与概率聚合）
            classification_prompt = self.create_classification_prompt(sample)
            num_runs = max(1, getattr(self, 'num_classify_runs', 5))
            vote_counter = {}
            last_output = None
            last_result = None
            for _ in range(num_runs):
                classification_output = await self.call_model_async(session, classification_prompt)
                last_output = classification_output
                classification_result = self.parse_model_output(classification_output)
                last_result = classification_result
                if not classification_result.label_id or not classification_result.label_name:
                    continue
                key = f"{classification_result.label_id}|||{classification_result.label_name}"
                vote_counter[key] = vote_counter.get(key, 0) + 1

            # 若无有效投票，按易混淆处理
            if not vote_counter:
                return sample, EvaluationResult(
                    is_valid=True,
                    valid_reason=validity_result.valid_reason,
                    label_id=None,
                    label_name=None,
                    label_reason="分类未产生有效结果",
                    error="no_valid_votes",
                    raw_output=last_output or "",
                    votes_for_winner=0,
                    total_votes=num_runs,
                    vote_distribution={},
                    category='confusing'
                )

            # 胜出标签
            winner_key = max(vote_counter.items(), key=lambda x: x[1])[0]
            winner_votes = vote_counter[winner_key]
            winner_label_id, winner_label_name = winner_key.split('|||', 1)

            # 类别判定
            majority_ratio = 0.8
            auto_pass_threshold = int(majority_ratio * num_runs + 1e-9)
            if auto_pass_threshold < 1:
                auto_pass_threshold = 1
            if winner_votes >= auto_pass_threshold:
                category = 'auto_pass'
            elif winner_votes >= (num_runs // 2 + 1):
                category = 'weak_consensus'
            else:
                category = 'confusing'

            merged = EvaluationResult(
                is_valid=True,
                valid_reason=validity_result.valid_reason,
                label_id=winner_label_id,
                label_name=winner_label_name,
                label_reason=f"多数票={winner_votes}/{num_runs}",
                error=None,
                raw_output=last_output or "",
                votes_for_winner=winner_votes,
                total_votes=num_runs,
                vote_distribution=vote_counter,
                category=category
            )

            return sample, merged
            
        except Exception as e:
            # 将评估失败的样本信息写入error.log文件
            error_entry = f"""{'='*80}
时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
错误类型: 评估样本失败
错误信息: {str(e)}
样本信息:
文件路径: {sample.get('file_path')}
unit_id: {sample.get('unit_id')}
原始内容: {sample.get('remove_content')}
修改内容: {sample.get('add_content')}
{'='*80}

"""
            self._write_error_log(error_entry)
            
            logger.error(f"评估样本失败: {e}")
            result = EvaluationResult(
                is_valid=False,
                valid_reason="评估失败",
                error=str(e)
            )
            return sample, result
    
    def init_output_files(self, output_dir: str, log_dir: str = None):
        """初始化输出文件"""
        os.makedirs(output_dir, exist_ok=True)
        if log_dir:
            os.makedirs(log_dir, exist_ok=True)
        timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
        
        # 创建线程安全的文件写入器
        jsonl_path = os.path.join(output_dir, f'evaluation_results_{timestamp}.jsonl')
        # 日志文件输出到 log_dir，如果未指定则输出到 output_dir
        log_base_dir = log_dir if log_dir else output_dir
        log_path = os.path.join(log_base_dir, f'evaluation_log_{timestamp}.txt')
        error_log_path = os.path.join(log_base_dir, f'error_{timestamp}.log')
        auto_pass_path = os.path.join(output_dir, f'auto_pass_{timestamp}.jsonl')
        weak_consensus_path = os.path.join(output_dir, f'weak_consensus_{timestamp}.jsonl')
        confusing_path = os.path.join(output_dir, f'confusing_{timestamp}.jsonl')
        
        self.output_files['jsonl'] = ThreadSafeFileWriter(jsonl_path)
        self.output_files['log'] = ThreadSafeFileWriter(log_path)
        self.output_files['error'] = ThreadSafeFileWriter(error_log_path)
        self.output_files['auto_pass'] = ThreadSafeFileWriter(auto_pass_path)
        self.output_files['weak_consensus'] = ThreadSafeFileWriter(weak_consensus_path)
        self.output_files['confusing'] = ThreadSafeFileWriter(confusing_path)
        
        self.output_dir = output_dir
        self.timestamp = timestamp
        
        logger.info(f"初始化输出文件：")
        logger.info(f"  - JSONL: {jsonl_path}")
        logger.info(f"  - 日志: {log_path}")
        logger.info(f"  - 错误日志: {error_log_path}")
        logger.info(f"  - 自动通过: {auto_pass_path}")
        logger.info(f"  - 弱一致: {weak_consensus_path}")
        logger.info(f"  - 易混淆: {confusing_path}")
    
    def close_output_files(self):
        """关闭所有打开的文件"""
        for writer in self.output_files.values():
            if writer:
                writer.close()
    
    def write_evaluation_result(self, sample: Dict, result: EvaluationResult):
        """线程安全地写入单条评估结果"""
        # 1. 写入JSONL文件
        jsonl_record = {
            # 保留原始样本的所有字段
            **sample,
            # 添加评估结果
            'evaluation': {
                'is_valid': result.is_valid,
                'valid_reason': result.valid_reason,
                'label_id': result.label_id,
                'label_name': result.label_name,
                'label_reason': result.label_reason,
                'error': result.error,
                'votes_for_winner': result.votes_for_winner,
                'total_votes': result.total_votes,
                'vote_distribution': result.vote_distribution,
                'category': result.category
            }
        }
        
        # 线程安全写入JSONL
        jsonl_line = json.dumps(jsonl_record, ensure_ascii=False) + '\n'
        self.output_files['jsonl'].write(jsonl_line)
        # 按类别额外写入分片
        category = jsonl_record.get('evaluation', {}).get('category')
        if category and category in self.output_files:
            self.output_files[category].write(jsonl_line)
        
        # 2. 写入日志文件
        log_entry = f"""{'='*80}
文件: {sample.get('file_path')}
unit_id: {sample.get('unit_id')}
{'-'*40}
原始内容:
{sample.get('remove_content')}
{'-'*40}
修改内容:
{sample.get('add_content')}
{'-'*40}
评估结果:
  是否有效: {result.is_valid}
  有效性理由: {result.valid_reason}
"""
        
        if result.is_valid and result.label_id:
            log_entry += f"  标签编号: {result.label_id}\n"
            log_entry += f"  标签名称: {result.label_name}\n"
            log_entry += f"  分类理由: {result.label_reason}\n"
        
        if result.error:
            log_entry += f"  错误: {result.error}\n"
            
        log_entry += f"{'='*80}\n\n"
        
        # 线程安全写入日志
        self.output_files['log'].write(log_entry)
    
    async def evaluate_all_samples_async(self, samples: List[Dict], output_dir: str, log_dir: str = None):
        """异步批量评估所有样本"""
        # 初始化输出文件
        self.init_output_files(output_dir, log_dir)
        
        try:
            async with aiohttp.ClientSession() as session:
                # 使用任务池模式，控制同时存在的协程数量
                pending_tasks = set()
                sample_iter = iter(samples)
                progress_bar = tqdm(total=len(samples), desc="评估进度")
                processed_count = 0
                
                # 初始化任务池
                for _ in range(min(self.max_concurrency, len(samples))):
                    try:
                        sample = next(sample_iter)
                        task = asyncio.create_task(
                            self.evaluate_sample_async(session, sample)
                        )
                        pending_tasks.add(task)
                    except StopIteration:
                        break
                
                # 处理任务
                while pending_tasks:
                    # 等待至少一个任务完成
                    done, pending_tasks = await asyncio.wait(
                        pending_tasks, return_when=asyncio.FIRST_COMPLETED
                    )
                    
                    # 处理完成的任务
                    for task in done:
                        sample, result = await task
                        
                        # 线程安全写入结果
                        self.write_evaluation_result(sample, result)
                        
                        # 保存到内存中用于后续统计
                        evaluation_record = {
                            'sample': sample,
                            'evaluation': result
                        }
                        self.evaluation_results.append(evaluation_record)
                        
                        processed_count += 1
                        progress_bar.update(1)
                        
                        # 显示进度信息
                        if result.is_valid:
                            logger.debug(f"样本 {processed_count}: ✓ 有效修改 - {result.label_name}")
                        else:
                            logger.debug(f"样本 {processed_count}: ✗ 无效修改")
                        
                        # 如果还有未处理的数据，创建新任务
                        try:
                            sample = next(sample_iter)
                            new_task = asyncio.create_task(
                                self.evaluate_sample_async(session, sample)
                            )
                            pending_tasks.add(new_task)
                        except StopIteration:
                            pass
                
                progress_bar.close()
                
        finally:
            # 确保文件被关闭
            self.close_output_files()
    
    def generate_final_reports(self):
        """生成最终的统计报告"""
        stats_file = os.path.join(self.output_dir, f'statistics_report_{self.timestamp}.txt')
        self._generate_statistics_report(stats_file)
        logger.info("已生成最终报告")
    
    def _generate_statistics_report(self, file_path: str):
        """生成统计报告"""
        total = len(self.evaluation_results)
        valid_count = sum(1 for r in self.evaluation_results if r['evaluation'].is_valid)
        error_count = sum(1 for r in self.evaluation_results if r['evaluation'].error)
        
        # 收集标签分布（只按label_id统计）
        label_distribution = {}
        label_id_to_name = {}
        for record in self.evaluation_results:
            result = record['evaluation']
            if result.is_valid and result.label_id:
                label_id = str(result.label_id)
                label_distribution[label_id] = label_distribution.get(label_id, 0) + 1
                # 记录第一个遇到的名称
                if label_id not in label_id_to_name and result.label_name:
                    label_id_to_name[label_id] = result.label_name
        
        # 生成报告
        with open(file_path, 'w', encoding='utf-8') as f:
            f.write("文档编辑评估统计报告\n")
            f.write(f"生成时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"{'='*60}\n\n")
            
            f.write("## 总体统计\n")
            f.write(f"- 总样本数: {total}\n")
            f.write(f"- 有效修改: {valid_count} ({valid_count/total*100:.1f}%)\n")
            f.write(f"- 无效修改: {total - valid_count} ({(total-valid_count)/total*100:.1f}%)\n")
            f.write(f"- 处理错误: {error_count}\n\n")
            
            f.write("## 标签分布(有效修改)\n")
            if label_distribution:
                for label_id, count in sorted(label_distribution.items(), key=lambda x: int(x[0])):
                    label_name = label_id_to_name.get(label_id, "")
                    percentage = count / valid_count * 100 if valid_count > 0 else 0
                    if label_name:
                        f.write(f"  {label_id}: {label_name} - {count} ({percentage:.1f}%)\n")
                    else:
                        f.write(f"  {label_id}: {count} ({percentage:.1f}%)\n")
            else:
                f.write("  无有效标签数据\n")

async def main_async():
    """异步主函数"""
    # 配置参数 - 完全依赖环境变量
    config = {
        'api_key': os.getenv('API_KEY', 'EMPTY'),
        'base_url': os.getenv('BASE_URL', 'http://localhost:15070'),
        'model_name': os.getenv('MODEL_NAME', '/remote-home1/fyyuan/tool_research/models/QwQ-32B'),
        'input_file': os.getenv('INPUT_FILE', '../data/processed/units_prs_merged.jsonl'),
        'output_dir': os.getenv('OUTPUT_DIR', '../data/label_data'),
        'log_dir': os.getenv('LOG_DIR', '../logs/label_log'),
        'max_concurrency': int(os.getenv('MAX_CONCURRENCY', '40'))
    }
    
    # 创建异步评估器
    evaluator = AsyncDocumentQualityEvaluator(
        api_key=config['api_key'],
        base_url=config['base_url'],
        model_name=config['model_name'],
        max_concurrency=config['max_concurrency']
    )
    
    try:
        # 加载样本
        samples = evaluator.load_samples(config['input_file'])
        
        # 执行异步评估
        logger.info("开始异步评估样本...")
        await evaluator.evaluate_all_samples_async(samples, config['output_dir'], config['log_dir'])
        
        # 生成最终报告
        evaluator.generate_final_reports()
        
        logger.info("评估完成！")
        logger.info(f"评估结果保存在: {config['output_dir']}")
        logger.info(f"日志文件保存在: {config['log_dir']}")
        
    except Exception as e:
        logger.error(f"评估过程出错: {e}")
        raise
    finally:
        # 确保文件被关闭
        evaluator.close_output_files()

def main():
    """主函数"""
    asyncio.run(main_async())

if __name__ == "__main__":
    main()