#!/usr/bin/env python
# encoding: utf-8
"""
实用的错义突变微调训练脚本
基于LucaOne嵌入进行微调
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, roc_auc_score, f1_score
import os
import sys
from tqdm import tqdm
import argparse

# 添加项目路径
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))

from practical_mutation_finetuning import (
    PracticalMutationFineTuningLayer,
    PracticalMutationLoss,
    PracticalMutationDataProcessor,
    PracticalMutationConfig
)


class MutationDataset(Dataset):
    """突变数据集"""
    
    def __init__(self, data_file, lucaone_embeddings_dir, max_length=2048):
        self.data = pd.read_csv(data_file)
        self.lucaone_embeddings_dir = lucaone_embeddings_dir
        self.max_length = max_length
        self.data_processor = PracticalMutationDataProcessor()
        
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        row = self.data.iloc[idx]
        
        # 获取序列
        seq_a = row['seq_a']
        seq_b = row['seq_b']
        label = float(row['label'])
        
        # 截断序列
        if len(seq_a) > self.max_length:
            seq_a = seq_a[:self.max_length]
            seq_b = seq_b[:self.max_length]
        
        # 加载LucaOne嵌入
        lucaone_embeddings_a = self._load_lucaone_embeddings(row['seq_id_a'])
        lucaone_embeddings_b = self._load_lucaone_embeddings(row['seq_id_b'])
        
        # 处理突变数据
        processed_data = self.data_processor.process_sequence_pair(seq_a, seq_b)
        
        return {
            'lucaone_embeddings_a': lucaone_embeddings_a,
            'lucaone_embeddings_b': lucaone_embeddings_b,
            'label': torch.tensor(label, dtype=torch.float32),
            'conservation_scores': processed_data['conservation_scores'],
            'aa_types': processed_data['aa_types'],
            'physicochemical_properties': processed_data['physicochemical_properties'],
            'mutation_positions': processed_data['mutation_positions']
        }
    
    def _load_lucaone_embeddings(self, seq_id):
        """加载LucaOne嵌入"""
        # 这里需要根据实际的嵌入文件格式来加载
        # 假设嵌入文件是.npy格式
        embedding_file = os.path.join(self.lucaone_embeddings_dir, f"{seq_id}.npy")
        if os.path.exists(embedding_file):
            embeddings = np.load(embedding_file)
            # 确保长度一致
            if len(embeddings) > self.max_length:
                embeddings = embeddings[:self.max_length]
            elif len(embeddings) < self.max_length:
                # 填充到最大长度
                padding = np.zeros((self.max_length - len(embeddings), embeddings.shape[1]))
                embeddings = np.vstack([embeddings, padding])
            return torch.tensor(embeddings, dtype=torch.float32)
        else:
            # 如果文件不存在，返回随机嵌入
            return torch.randn(self.max_length, 1024)


def train_epoch(model, dataloader, criterion, optimizer, device):
    """训练一个epoch"""
    model.train()
    total_loss = 0
    predictions = []
    targets = []
    
    for batch in tqdm(dataloader, desc="Training"):
        # 移动数据到设备
        for key in batch:
            if isinstance(batch[key], torch.Tensor):
                batch[key] = batch[key].to(device)
        
        optimizer.zero_grad()
        
        # 前向传播
        outputs = model(
            batch['lucaone_embeddings_a'],
            batch['lucaone_embeddings_b'],
            conservation_scores=batch['conservation_scores'],
            aa_types=batch['aa_types'],
            physicochemical_properties=batch['physicochemical_properties'],
            mutation_positions=batch['mutation_positions']
        )
        
        # 计算损失
        loss = criterion(
            outputs['prediction'],
            batch['label'].unsqueeze(1),
            outputs['attention_weights'],
            batch['mutation_positions']
        )
        
        # 反向传播
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
        predictions.extend(outputs['prediction'].cpu().detach().numpy())
        targets.extend(batch['label'].cpu().detach().numpy())
    
    # 计算指标
    predictions = np.array(predictions).flatten()
    targets = np.array(targets)
    
    # 二分类预测
    binary_predictions = (predictions > 0.5).astype(int)
    
    accuracy = accuracy_score(targets, binary_predictions)
    auc = roc_auc_score(targets, predictions)
    f1 = f1_score(targets, binary_predictions)
    
    return {
        'loss': total_loss / len(dataloader),
        'accuracy': accuracy,
        'auc': auc,
        'f1': f1
    }


def evaluate_epoch(model, dataloader, criterion, device):
    """评估一个epoch"""
    model.eval()
    total_loss = 0
    predictions = []
    targets = []
    
    with torch.no_grad():
        for batch in tqdm(dataloader, desc="Evaluating"):
            # 移动数据到设备
            for key in batch:
                if isinstance(batch[key], torch.Tensor):
                    batch[key] = batch[key].to(device)
            
            # 前向传播
            outputs = model(
                batch['lucaone_embeddings_a'],
                batch['lucaone_embeddings_b'],
                conservation_scores=batch['conservation_scores'],
                aa_types=batch['aa_types'],
                physicochemical_properties=batch['physicochemical_properties'],
                mutation_positions=batch['mutation_positions']
            )
            
            # 计算损失
            loss = criterion(
                outputs['prediction'],
                batch['label'].unsqueeze(1),
                outputs['attention_weights'],
                batch['mutation_positions']
            )
            
            total_loss += loss.item()
            predictions.extend(outputs['prediction'].cpu().detach().numpy())
            targets.extend(batch['label'].cpu().detach().numpy())
    
    # 计算指标
    predictions = np.array(predictions).flatten()
    targets = np.array(targets)
    
    # 二分类预测
    binary_predictions = (predictions > 0.5).astype(int)
    
    accuracy = accuracy_score(targets, binary_predictions)
    auc = roc_auc_score(targets, predictions)
    f1 = f1_score(targets, binary_predictions)
    
    return {
        'loss': total_loss / len(dataloader),
        'accuracy': accuracy,
        'auc': auc,
        'f1': f1
    }


def main():
    parser = argparse.ArgumentParser(description='训练错义突变微调模型')
    parser.add_argument('--train_data', type=str, required=True, help='训练数据文件')
    parser.add_argument('--dev_data', type=str, required=True, help='验证数据文件')
    parser.add_argument('--test_data', type=str, required=True, help='测试数据文件')
    parser.add_argument('--lucaone_embeddings_dir', type=str, required=True, help='LucaOne嵌入目录')
    parser.add_argument('--output_dir', type=str, required=True, help='输出目录')
    parser.add_argument('--batch_size', type=int, default=16, help='批次大小')
    parser.add_argument('--learning_rate', type=float, default=1e-4, help='学习率')
    parser.add_argument('--num_epochs', type=int, default=50, help='训练轮数')
    parser.add_argument('--max_length', type=int, default=2048, help='最大序列长度')
    parser.add_argument('--device', type=str, default='cuda', help='设备')
    
    args = parser.parse_args()
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 设置设备
    device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 创建配置
    config = PracticalMutationConfig()
    
    # 创建模型
    model = PracticalMutationFineTuningLayer(config).to(device)
    
    # 创建损失函数
    criterion = PracticalMutationLoss()
    
    # 创建优化器
    optimizer = optim.AdamW(
        model.parameters(),
        lr=args.learning_rate,
        weight_decay=config.weight_decay
    )
    
    # 创建学习率调度器
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.num_epochs)
    
    # 创建数据集
    train_dataset = MutationDataset(args.train_data, args.lucaone_embeddings_dir, args.max_length)
    dev_dataset = MutationDataset(args.dev_data, args.lucaone_embeddings_dir, args.max_length)
    test_dataset = MutationDataset(args.test_data, args.lucaone_embeddings_dir, args.max_length)
    
    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    dev_loader = DataLoader(dev_dataset, batch_size=args.batch_size, shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
    
    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(dev_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")
    
    # 训练循环
    best_f1 = 0
    best_epoch = 0
    
    for epoch in range(args.num_epochs):
        print(f"\nEpoch {epoch + 1}/{args.num_epochs}")
        print("-" * 50)
        
        # 训练
        train_metrics = train_epoch(model, train_loader, criterion, optimizer, device)
        print(f"训练 - Loss: {train_metrics['loss']:.4f}, "
              f"Accuracy: {train_metrics['accuracy']:.4f}, "
              f"AUC: {train_metrics['auc']:.4f}, "
              f"F1: {train_metrics['f1']:.4f}")
        
        # 验证
        dev_metrics = evaluate_epoch(model, dev_loader, criterion, device)
        print(f"验证 - Loss: {dev_metrics['loss']:.4f}, "
              f"Accuracy: {dev_metrics['accuracy']:.4f}, "
              f"AUC: {dev_metrics['auc']:.4f}, "
              f"F1: {dev_metrics['f1']:.4f}")
        
        # 更新学习率
        scheduler.step()
        
        # 保存最佳模型
        if dev_metrics['f1'] > best_f1:
            best_f1 = dev_metrics['f1']
            best_epoch = epoch + 1
            torch.save(model.state_dict(), os.path.join(args.output_dir, 'best_model.pth'))
            print(f"保存最佳模型 (F1: {best_f1:.4f})")
    
    print(f"\n训练完成！最佳F1分数: {best_f1:.4f} (Epoch {best_epoch})")
    
    # 加载最佳模型进行测试
    model.load_state_dict(torch.load(os.path.join(args.output_dir, 'best_model.pth')))
    test_metrics = evaluate_epoch(model, test_loader, criterion, device)
    
    print(f"\n测试结果:")
    print(f"Loss: {test_metrics['loss']:.4f}")
    print(f"Accuracy: {test_metrics['accuracy']:.4f}")
    print(f"AUC: {test_metrics['auc']:.4f}")
    print(f"F1: {test_metrics['f1']:.4f}")


if __name__ == "__main__":
    main()
