#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
蒸馏学生模型评估脚本
使用 CodeT5-small 基座 + LoRA 适配器的蒸馏模型评估
"""
import os
import json
import ast
import sys
import time
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from peft import PeftModel

# 添加项目根目录到路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))


def load_distilled_model(adapter_path="model/distilled_student"):
    """加载蒸馏学生模型 (CodeT5-small + LoRA 适配器)
    
    Args:
        adapter_path: LoRA 适配器路径
    
    Returns:
        (model, tokenizer): 模型和分词器
    """
    print("[INFO] 加载蒸馏学生模型...")
    
    # 基座模型使用 codet5-small
    base_model_path = "./models/Salesforce_codet5-small"
    
    if not os.path.exists(base_model_path):
        print(f"[WARNING] 本地模型未找到: {base_model_path}")
        print("[INFO] 从 HuggingFace 下载...")
        base_model_path = "Salesforce/codet5-small"
    
    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(base_model_path)
    
    # 加载基座模型
    base_model = AutoModelForSeq2SeqLM.from_pretrained(
        base_model_path,
        device_map="auto",
        torch_dtype=torch.float16
    )
    
    # 加载 LoRA 适配器
    model = PeftModel.from_pretrained(base_model, adapter_path)
    
    print(f"[SUCCESS] 模型加载完成: {base_model_path} + {adapter_path}")
    return model, tokenizer


def generate_code(model, tokenizer, prompt: str, max_length=128):
    """使用蒸馏模型生成代码
    
    Args:
        model: 模型实例
        tokenizer: 分词器
        prompt: 输入提示
        max_length: 最大生成长度
    
    Returns:
        str: 生成的代码
    """
    inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
    inputs = {k: v.to(model.device) for k, v in inputs.items()}
    
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_length=max_length,
            num_beams=5,
            early_stopping=True,
            temperature=1.0
        )
    
    code = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return code


def _rough_quality_check(code_str: str, docstring: str) -> bool:
    """基于 AST 和关键词的粗略质量检查
    
    Args:
        code_str: 生成的代码
        docstring: 原始文档字符串
    
    Returns:
        bool: 是否通过检查
    """
    # AST 语法验证
    try:
        ast.parse(code_str)
    except SyntaxError:
        return False
    
    # 基本结构检查
    if "def " not in code_str and "class " not in code_str:
        return False
    
    # 关键词匹配检查
    doc_lower = docstring.lower()
    code_lower = code_str.lower()
    
    # 提取文档字符串中的关键词
    keywords = []
    for word in ["return", "calculate", "get", "set", "create", "update", "delete", "find"]:
        if word in doc_lower:
            keywords.append(word)
    
    # 至少匹配一个关键词（如果有的话）
    if keywords:
        if not any(kw in code_lower for kw in keywords):
            return False
    
    return True


def evaluate_distilled_model(n_samples=100):
    """评估蒸馏模型在 CodeSearchNet-Python 测试集上的表现
    
    Args:
        n_samples: 评测样本数量
    
    Returns:
        dict: 评测结果字典
    """
    # 检测并加载测试集
    test_paths = [
        'data/processed/codesearchnet_test_text2code.jsonl',
        'data/processed/complex_test_text2code.jsonl',
    ]
    
    test_path = None
    for p in test_paths:
        if os.path.exists(p):
            test_path = p
            break
    
    if not test_path:
        print(f"[ERROR] 未找到测试集: {test_paths}")
        return
    
    print(f"[INFO] 加载测试集: {test_path}")
    test_data = []
    with open(test_path, 'r', encoding='utf-8') as f:
        for line in f:
            line = line.strip()
            if line:
                test_data.append(json.loads(line))
    
    if len(test_data) > n_samples:
        test_data = test_data[:n_samples]
    
    print(f"[INFO] 评测样本数: {len(test_data)}")
    
    # 加载蒸馏模型
    model, tokenizer = load_distilled_model()
    
    # 开始评估
    passed = 0
    total = 0
    total_time = 0.0
    
    for i, item in enumerate(test_data, 1):
        docstring = item.get('instruction', item.get('docstring', ''))
        reference_code = item.get('output', item.get('code', ''))
        
        # 生成代码并计时
        start_time = time.time()
        generated_code = generate_code(model, tokenizer, docstring)
        elapsed = time.time() - start_time
        total_time += elapsed
        
        # 质量检查
        is_valid = _rough_quality_check(generated_code, docstring)
        
        if is_valid:
            passed += 1
        
        total += 1
        
        # 打印进度
        if i % 10 == 0:
            print(f"[PROGRESS] {i}/{len(test_data)}: {passed}/{total} 通过 ({100*passed/total:.1f}%)")
        
        # 显示前几个示例
        if i <= 3:
            print(f"\n{'='*60}")
            print(f"Sample {i}:")
            print(f"Docstring: {docstring[:100]}...")
            print(f"Generated: {generated_code[:150]}...")
            print(f"Valid: {is_valid}")
            print(f"Time: {elapsed:.2f}s")
    
    # 计算最终统计
    accuracy = 100.0 * passed / total if total > 0 else 0.0
    avg_time = total_time / total if total > 0 else 0.0
    
    print(f"\n{'='*60}")
    print(f"[RESULT] 蒸馏学生模型评估")
    print(f"{'='*60}")
    print(f"Passed:     {passed}/{total}")
    print(f"Accuracy:   {accuracy:.1f}%")
    print(f"Avg Time:   {avg_time:.2f}s per sample")
    print(f"Total Time: {total_time:.1f}s")
    print(f"{'='*60}")
    
    # 保存评测结果
    result = {
        "model": "distilled_student",
        "base_model": "Salesforce/codet5-small",
        "adapter": "model/distilled_student",
        "test_samples": total,
        "passed": passed,
        "accuracy": accuracy,
        "avg_time_per_sample": avg_time,
        "total_time": total_time
    }
    
    output_file = "results/distilled_evaluation_detailed.json"
    os.makedirs("results", exist_ok=True)
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(result, f, indent=2, ensure_ascii=False)
    
    print(f"\n[SAVED] 结果保存至: {output_file}")
    
    return result


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="蒸馏学生模型评估")
    parser.add_argument('--n', type=int, default=100, help="评测样本数量 (默认: 100)")
    args = parser.parse_args()
    
    evaluate_distilled_model(n_samples=args.n)
