#!/usr/bin/env python3
"""
Embedding模型QLoRA微调效果评估脚本
评估微调后的embedding模型在检索任务上的性能
"""

import json
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import argparse
from typing import List, Dict, Tuple, Optional
import torch
from transformers import AutoModel, AutoTokenizer
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
from tqdm import tqdm
import logging

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class EmbeddingModelEvaluator:
    def __init__(self, 
                 base_model_name: str = "Qwen/Qwen3-Embedding-0.6B",
                 finetuned_model_path: Optional[str] = None,
                 device: str = "cuda" if torch.cuda.is_available() else "cpu"):
        """
        初始化评估器
        
        Args:
            base_model_name: 基础模型名称
            finetuned_model_path: 微调后模型路径
            device: 计算设备
        """
        self.device = device
        self.base_model_name = base_model_name
        self.finetuned_model_path = finetuned_model_path
        
        # 加载tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(base_model_name)
        
        # 加载基础模型
        logger.info(f"加载基础模型: {base_model_name}")
        self.base_model = AutoModel.from_pretrained(base_model_name).to(device)
        self.base_model.eval()
        
        # 加载微调模型
        self.finetuned_model = None
        if finetuned_model_path:
            logger.info(f"加载微调模型: {finetuned_model_path}")
            self.finetuned_model = AutoModel.from_pretrained(finetuned_model_path).to(device)
            self.finetuned_model.eval()
    
    def encode_texts(self, texts: List[str], model: torch.nn.Module, batch_size: int = 32) -> np.ndarray:
        """
        编码文本为向量
        
        Args:
            texts: 文本列表
            model: 使用的模型
            batch_size: 批处理大小
            
        Returns:
            文本向量矩阵
        """
        embeddings = []
        
        with torch.no_grad():
            for i in tqdm(range(0, len(texts), batch_size), desc="编码文本"):
                batch_texts = texts[i:i + batch_size]
                
                # Tokenize
                inputs = self.tokenizer(
                    batch_texts,
                    padding=True,
                    truncation=True,
                    max_length=512,
                    return_tensors="pt"
                ).to(self.device)
                
                # 获取embeddings
                outputs = model(**inputs)
                # 使用[CLS] token的embedding或者mean pooling
                if hasattr(outputs, 'last_hidden_state'):
                    # Mean pooling
                    attention_mask = inputs['attention_mask']
                    token_embeddings = outputs.last_hidden_state
                    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
                    batch_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
                else:
                    batch_embeddings = outputs.pooler_output
                
                embeddings.append(batch_embeddings.cpu().numpy())
        
        return np.vstack(embeddings)
    
    def evaluate_retrieval(self, test_data_path: str, k_values: List[int] = [1, 5, 10, 20]) -> Dict:
        """
        评估检索性能
        
        Args:
            test_data_path: 测试数据路径
            k_values: 评估的K值列表
            
        Returns:
            评估结果字典
        """
        logger.info(f"加载测试数据: {test_data_path}")
        
        # 加载测试数据
        queries = []
        positive_docs = []
        all_docs = []
        doc_to_query_map = {}  # 文档到查询的映射
        
        with open(test_data_path, 'r', encoding='utf-8') as f:
            for line_num, line in enumerate(f):
                try:
                    data = json.loads(line.strip())
                    query = data['query']
                    pos_doc = data['positive_doc']
                    
                    queries.append(query)
                    positive_docs.append(pos_doc)
                    
                    # 构建文档库（包含正例和负例）
                    if pos_doc not in all_docs:
                        all_docs.append(pos_doc)
                        doc_to_query_map[len(all_docs) - 1] = line_num
                    
                    # 添加负例文档
                    if 'negative_docs' in data:
                        for neg_doc in data['negative_docs']:
                            if neg_doc not in all_docs:
                                all_docs.append(neg_doc)
                                
                except json.JSONDecodeError:
                    logger.warning(f"第 {line_num} 行JSON解析错误，跳过")
                    continue
        
        logger.info(f"加载了 {len(queries)} 个查询和 {len(all_docs)} 个文档")
        
        results = {}
        
        # 评估基础模型
        logger.info("评估基础模型...")
        query_embeddings_base = self.encode_texts(queries, self.base_model)
        doc_embeddings_base = self.encode_texts(all_docs, self.base_model)
        results['base_model'] = self._calculate_retrieval_metrics(
            query_embeddings_base, doc_embeddings_base, queries, positive_docs, all_docs, k_values
        )
        
        # 评估微调模型
        if self.finetuned_model:
            logger.info("评估微调模型...")
            query_embeddings_ft = self.encode_texts(queries, self.finetuned_model)
            doc_embeddings_ft = self.encode_texts(all_docs, self.finetuned_model)
            results['finetuned_model'] = self._calculate_retrieval_metrics(
                query_embeddings_ft, doc_embeddings_ft, queries, positive_docs, all_docs, k_values
            )
        
        return results
    
    def _calculate_retrieval_metrics(self, 
                                   query_embeddings: np.ndarray,
                                   doc_embeddings: np.ndarray,
                                   queries: List[str],
                                   positive_docs: List[str],
                                   all_docs: List[str],
                                   k_values: List[int]) -> Dict:
        """计算检索指标"""
        
        # 计算相似度矩阵
        similarity_matrix = cosine_similarity(query_embeddings, doc_embeddings)
        
        metrics = {}
        
        for k in k_values:
            recall_at_k = []
            mrr_at_k = []
            
            for i, (query, pos_doc) in enumerate(zip(queries, positive_docs)):
                # 获取当前查询的相似度分数
                scores = similarity_matrix[i]
                
                # 排序获取top-k
                top_k_indices = np.argsort(scores)[::-1][:k]
                top_k_docs = [all_docs[idx] for idx in top_k_indices]
                
                # 计算Recall@K
                if pos_doc in top_k_docs:
                    recall_at_k.append(1.0)
                    # 计算MRR@K
                    rank = top_k_docs.index(pos_doc) + 1
                    mrr_at_k.append(1.0 / rank)
                else:
                    recall_at_k.append(0.0)
                    mrr_at_k.append(0.0)
            
            metrics[f'recall_at_{k}'] = np.mean(recall_at_k)
            metrics[f'mrr_at_{k}'] = np.mean(mrr_at_k)
        
        return metrics
    
    def plot_comparison(self, results: Dict, output_path: str = "assets/figures/EmbeddingModel_QLoRA_Comparison.svg"):
        """绘制对比图表"""
        
        if 'finetuned_model' not in results:
            logger.warning("没有微调模型结果，无法绘制对比图")
            return
        
        # 准备数据
        metrics = ['recall_at_1', 'recall_at_5', 'recall_at_10', 'recall_at_20', 
                  'mrr_at_1', 'mrr_at_5', 'mrr_at_10', 'mrr_at_20']
        
        base_scores = [results['base_model'][metric] for metric in metrics]
        ft_scores = [results['finetuned_model'][metric] for metric in metrics]
        
        # 创建图表
        plt.style.use('seaborn-v0_8')
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
        
        # Recall对比
        recall_metrics = [m for m in metrics if 'recall' in m]
        recall_base = [results['base_model'][m] for m in recall_metrics]
        recall_ft = [results['finetuned_model'][m] for m in recall_metrics]
        
        x = np.arange(len(recall_metrics))
        width = 0.35
        
        ax1.bar(x - width/2, recall_base, width, label='Base Model', alpha=0.8, color='skyblue')
        ax1.bar(x + width/2, recall_ft, width, label='QLoRA Fine-tuned', alpha=0.8, color='lightgreen')
        
        ax1.set_xlabel('Metrics')
        ax1.set_ylabel('Score')
        ax1.set_title('Recall@K Comparison')
        ax1.set_xticks(x)
        ax1.set_xticklabels([m.replace('_', '@').replace('recall@', 'R@') for m in recall_metrics])
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # MRR对比
        mrr_metrics = [m for m in metrics if 'mrr' in m]
        mrr_base = [results['base_model'][m] for m in mrr_metrics]
        mrr_ft = [results['finetuned_model'][m] for m in mrr_metrics]
        
        x = np.arange(len(mrr_metrics))
        
        ax2.bar(x - width/2, mrr_base, width, label='Base Model', alpha=0.8, color='skyblue')
        ax2.bar(x + width/2, mrr_ft, width, label='QLoRA Fine-tuned', alpha=0.8, color='lightgreen')
        
        ax2.set_xlabel('Metrics')
        ax2.set_ylabel('Score')
        ax2.set_title('MRR@K Comparison')
        ax2.set_xticks(x)
        ax2.set_xticklabels([m.replace('_', '@').replace('mrr@', 'MRR@') for m in mrr_metrics])
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        # 保存图表
        Path(output_path).parent.mkdir(parents=True, exist_ok=True)
        plt.savefig(output_path, dpi=300, bbox_inches='tight', format='svg')
        logger.info(f"对比图表已保存到: {output_path}")
        plt.show()
    
    def save_results(self, results: Dict, output_path: str = "evaluation/results/embedding_qlora_results.json"):
        """保存评估结果"""
        Path(output_path).parent.mkdir(parents=True, exist_ok=True)
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(results, f, ensure_ascii=False, indent=2)
        logger.info(f"评估结果已保存到: {output_path}")

def main():
    parser = argparse.ArgumentParser(description="评估Embedding模型QLoRA微调效果")
    parser.add_argument("--test_data", type=str, required=True,
                       help="测试数据路径")
    parser.add_argument("--base_model", type=str, default="Qwen/Qwen3-Embedding-0.6B",
                       help="基础模型名称")
    parser.add_argument("--finetuned_model", type=str,
                       help="微调后模型路径")
    parser.add_argument("--output_dir", type=str, default="evaluation/results",
                       help="输出目录")
    parser.add_argument("--k_values", nargs='+', type=int, default=[1, 5, 10, 20],
                       help="评估的K值")
    
    args = parser.parse_args()
    
    # 创建评估器
    evaluator = EmbeddingModelEvaluator(
        base_model_name=args.base_model,
        finetuned_model_path=args.finetuned_model
    )
    
    # 运行评估
    results = evaluator.evaluate_retrieval(args.test_data, args.k_values)
    
    # 打印结果
    print("\n=== 评估结果 ===")
    for model_name, metrics in results.items():
        print(f"\n{model_name.upper()}:")
        for metric, score in metrics.items():
            print(f"  {metric}: {score:.4f}")
    
    # 绘制对比图
    evaluator.plot_comparison(results)
    
    # 保存结果
    evaluator.save_results(results, f"{args.output_dir}/embedding_qlora_results.json")

if __name__ == "__main__":
    main()
