"""
数据集验证器

用于验证数据集有效性的核心类，包括：
1. 评估PR变更的好坏
2. 直接修改PR原始数据
3. 支持few-shot和无few-shot两种模式
"""

from datetime import datetime
import logging
import re
import os
import json
import time
from typing import Dict, Any, Tuple
from dataclasses import dataclass
from openai import OpenAI
from prompts import create_evaluation_prompt
from embedding_few_shot_selector import EmbeddingFewShotSelector
from config import Config
# request_profiler 函数已合并到此文件中
import csv
from datetime import datetime

# 获取日志记录器
logger = logging.getLogger(__name__)



@dataclass
class ValidationResult:
    """验证结果数据类"""
    sample_id: str
    task_type: str  # 'evaluation'
    prompt_type: str  # 'few_shot' 或 'zero_shot'
    input_content: Dict[str, Any]
    model_output: str
    processed_output: str  # 处理后的输出
    think_content: str = None  # 思考过程内容
    success: bool = True
    error: str = None
    execution_time: float = None

class DatasetValidator:
    """数据集验证器"""
    
    def __init__(self, api_key: str, base_url: str, model_name: str = None, label_data_dir: str = None):
        """
        初始化验证器
        
        Args:
            api_key: API密钥
            base_url: API基础URL
            model_name: 模型名称
        """
        self.api_key = api_key
        # 确保base_url格式正确，拼接完整的API端点
        self.base_url = base_url.rstrip("/") + "/v1/chat/completions"
        self.model_name = model_name or Config.get_model_name()
        
        # 初始化OpenAI客户端
        self.client = OpenAI(
            api_key=api_key,
            base_url=base_url.rstrip("/") + "/v1"  # OpenAI客户端只需要到/v1
        )
        # few-shot选择器（仅使用向量库）
        self.embedding_selector = EmbeddingFewShotSelector(
            embedding_dir=label_data_dir or Config.get_embedding_dir(),
            model_name=Config.get_embedding_model()
        )

        # few-shot 选择结果日志根目录（可通过环境变量覆盖），并为本次会话创建子目录
        self.session_ts = datetime.now().strftime('%Y%m%d_%H%M%S')
        base_log_dir = Config.get_few_shot_log_dir()
        self.few_shot_log_dir = os.path.join(base_log_dir, self.session_ts)
        try:
            os.makedirs(self.few_shot_log_dir, exist_ok=True)
        except Exception as e:
            logger.warning(f"创建few-shot日志目录失败: {e}")
        
        logger.info(f"初始化数据集验证器，模型: {model_name}")
        
        
        # 初始化few-shot prompt日志文件
        self.prompt_log_file = os.getenv('PROMPT_LOG_FILE', '')
        if self.prompt_log_file:
            self._init_prompt_log()
    
    def _init_prompt_log(self):
        """初始化prompt日志文件"""
        try:
            os.makedirs(os.path.dirname(self.prompt_log_file), exist_ok=True)
            if not os.path.exists(self.prompt_log_file):
                with open(self.prompt_log_file, 'w', encoding='utf-8') as f:
                    f.write("="*80 + "\n")
                    f.write("Few-shot Prompt 日志文件\n")
                    f.write(f"创建时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                    f.write("="*80 + "\n\n")
            logger.info(f"Few-shot prompt日志文件: {self.prompt_log_file}")
        except Exception as e:
            logger.warning(f"初始化prompt日志文件失败: {e}")
    
    def _log_few_shot_prompt(self, sample_id: str, task_type: str, prompt: str, examples_count: int = 0):
        """记录few-shot prompt到文件"""
        if not self.prompt_log_file:
            return
        
        try:
            with open(self.prompt_log_file, 'a', encoding='utf-8') as f:
                f.write(f"\n{'='*60}\n")
                f.write(f"时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"样本ID: {sample_id}\n")
                f.write(f"任务类型: {task_type}\n")
                f.write(f"Few-shot示例数量: {examples_count}\n")
                f.write(f"{'='*60}\n")
                f.write("Prompt内容:\n")
                f.write(prompt)
                f.write(f"\n{'='*60}\n\n")
        except Exception as e:
            logger.warning(f"记录prompt失败: {e}")

    def _log_few_shot_selection(self, sample: Dict[str, Any], label_id: int, examples: list):
        """将当前样本与选用的few-shot保存为JSON日志，便于审查回放"""
        try:
            ts = time.strftime('%Y%m%d_%H%M%S')
            unit_id = (sample.get('unit_id') or 'unknown').replace('/', '_')
            filename = f"few_shot_{unit_id}_{ts}.json"
            out_path = os.path.join(self.few_shot_log_dir, filename)

            payload = {
                "timestamp": ts,
                "unit_id": unit_id,
                "label_id": label_id,
                "sample": {
                    "pr_number": sample.get('pr_number'),
                    "file_path": sample.get('file_path'),
                    "add_content": sample.get('add_content'),
                    "remove_content": sample.get('remove_content'),
                    "context_before": sample.get('context_before'),
                    "context_after": sample.get('context_after'),
                    "pre_label": sample.get('pre_label') or {},
                },
                "few_shot_examples": examples,
            }

            with open(out_path, 'w', encoding='utf-8') as f:
                json.dump(payload, f, ensure_ascii=False, indent=2)
            logger.info(f"few-shot 选择结果已保存: {out_path}")
        except Exception as e:
            logger.warning(f"保存few-shot选择日志失败: {e}")
    
    def call_model(self, prompt: str) -> str:
        """
        调用大模型
        
        Args:
            prompt: 提示词
            
        Returns:
            模型输出
        """
        import time as _t
        start_ts = _t.time()
        prompt_len = len(prompt or '')
        try:
            response = self.client.chat.completions.create(
                model=self.model_name,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.7,
                max_tokens=2048
            )
            content = response.choices[0].message.content
            duration = _t.time() - start_ts
            return content
        except Exception as e:
            duration = _t.time() - start_ts
            logger.error(f"调用模型失败: {e}")
            raise
    
    def process_model_output(self, output: str) -> Tuple[str, str]:
        """
        处理模型输出，分离思考过程和最终结果
        
        Args:
            output: 原始模型输出
            
        Returns:
            (processed_output, think_content): 处理后的输出和思考过程
        """
        if not output:
            return "", ""
        
        # 查找</think>标签
        think_pattern = r'</think>'
        think_match = re.search(think_pattern, output)
        
        if think_match:
            # 分割思考过程和最终结果
            think_end_pos = think_match.end()
            think_content = output[:think_end_pos].strip()
            processed_output = output[think_end_pos:].strip()
            
            logger.info(f"检测到</think>标签，分离思考过程和结果")
            return processed_output, think_content
        else:
            # 没有</think>标签，整个输出作为结果
            return output.strip(), ""
    
    def evaluate_pr_content(self, sample: Dict, use_few_shot: bool = True) -> ValidationResult:
        """
        评估PR变更的好坏
        
        Args:
            sample: 样本数据
            use_few_shot: 是否使用few-shot
            
        Returns:
            验证结果
        """
        start_time = datetime.now()
        
        try:
            # 创建提示词（动态few-shot）：
            if use_few_shot:
                label_id = None
                pre_label = sample.get('pre_label') or sample.get('evaluation') or {}
                if isinstance(pre_label, dict):
                    label_id = pre_label.get('label_id')
                    if isinstance(label_id, str) and label_id.isdigit():
                        label_id = int(label_id)
                examples = []
                # 仅当存在有效label且不为-1时才尝试取示例
                if isinstance(label_id, int) and label_id != -1:
                    # 基于向量库相似检索 Top-5
                    vec_examples = self.embedding_selector.get_similar_examples_by_label(sample, label_id, top_k=5)
                    if vec_examples:
                        examples = vec_examples
                    else:
                        # 回退：从对应标签目录下 data.jsonl 随机选择
                        rand_examples = self.embedding_selector.get_random_examples_by_label(label_id, top_k=5)
                        if rand_examples:
                            examples = rand_examples

                    # 记录 few-shot 选择明细
                    try:
                        self._log_few_shot_selection(sample, label_id, examples)
                    except Exception as _e:
                        logger.warning(f"记录few-shot选择明细失败: {_e}")
                prompt = create_evaluation_prompt(sample, examples if examples else [])
                
                # 记录few-shot prompt
                if examples:
                    self._log_few_shot_prompt(
                        sample.get('unit_id', 'unknown'),
                        'evaluation',
                        prompt,
                        len(examples)
                    )
            else:
                prompt = create_evaluation_prompt(sample, [])
            
            # 调用模型
            model_output = self.call_model(prompt)
            
            # 处理模型输出
            processed_output, think_content = self.process_model_output(model_output)
            
            # 计算执行时间
            execution_time = (datetime.now() - start_time).total_seconds()
            
            return ValidationResult(
                sample_id=sample.get('unit_id', 'unknown'),
                task_type='evaluation',
                prompt_type='few_shot' if (use_few_shot and examples) else 'zero_shot',
                input_content=sample,
                model_output=model_output,
                processed_output=processed_output,
                think_content=think_content,
                success=True,
                execution_time=execution_time
            )
            
        except Exception as e:
            execution_time = (datetime.now() - start_time).total_seconds()
            return ValidationResult(
                sample_id=sample.get('unit_id', 'unknown'),
                task_type='evaluation',
                prompt_type='few_shot' if use_few_shot else 'zero_shot',
                input_content=sample,
                model_output="",
                processed_output="",
                think_content="",
                success=False,
                error=str(e),
                execution_time=execution_time
            )
    

    
    def validate_sample(self, sample: Dict) -> Dict[str, ValidationResult]:
        """
        对单个样本进行评估验证
        
        Args:
            sample: 样本数据
            
        Returns:
            包含所有验证结果的字典
        """
        results = {}
        
        # few-shot 评估
        logger.info(f"评估样本 {sample.get('unit_id', 'unknown')} (few-shot)")
        results['evaluation_few_shot'] = self.evaluate_pr_content(sample, use_few_shot=True)

        # zero-shot 评估
        logger.info(f"评估样本 {sample.get('unit_id', 'unknown')} (zero-shot)")
        results['evaluation_zero_shot'] = self.evaluate_pr_content(sample, use_few_shot=False)

        return results
