import os
import yaml
import torch
import torch.nn as nn
import numpy as np
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingLR
from transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup
from tqdm import tqdm
import wandb
from sklearn.metrics import classification_report, f1_score, precision_score, recall_score
from data_preparation import prepare_dataloaders, EMOTION_MAP
from llava.model import LlavaLlamaForCausalLM
from llava.conversation import conv_templates

# 获取当前文件的绝对路径
current_dir = os.path.dirname(os.path.abspath(__file__))
# 获取项目根目录
root_dir = os.path.dirname(current_dir)

class EmotionClassifier(nn.Module):
    def __init__(self, llava_model, num_classes=8):
        super().__init__()
        self.llava = llava_model
        self.classifier = nn.Linear(self.llava.config.hidden_size, num_classes)
        
        # 在初始化时加载视觉模型
        self.vision_tower = self.llava.model.vision_tower
        if not self.vision_tower.is_loaded:
            self.vision_tower.load_model()
        
        # 确保在正确的设备上
        device = next(self.llava.parameters()).device
        self.vision_tower.to(device)
        
    def forward(self, input_ids, pixel_values):
        # 处理图像，指定输出隐藏状态
        outputs = self.vision_tower.vision_tower(
            pixel_values,
            output_hidden_states=True,
            return_dict=True
        )
        
        # 选择倒数第二层的特征 (根据配置 mm_vision_select_layer: -2)
        hidden_states = outputs.hidden_states[-2]
        
        # 只使用patch特征，不使用cls token (根据配置 mm_vision_select_feature: "patch")
        image_features = hidden_states[:, 1:]
        
        # 对图像特征进行平均池化
        image_features = image_features.mean(dim=1)  # [batch_size, hidden_size]
        
        # 使用 mm_projector 处理图像特征 (根据配置 use_mm_proj: true)
        if self.llava.model.mm_projector is not None:
            image_features = self.llava.model.mm_projector(image_features)
        
        # 获取语言模型输出
        outputs = self.llava.model(
            input_ids=input_ids,
            return_dict=True
        )
        
        # 使用最后一个隐藏状态进行分类
        last_hidden_state = outputs.last_hidden_state[:, -1, :]  # [batch_size, hidden_size]
        
        # 将图像特征和文本特征结合
        combined_features = last_hidden_state + image_features  # [batch_size, hidden_size]
        
        # 分类
        logits = self.classifier(combined_features)
        return logits

def validate(model, val_loader, criterion, device):
    model.eval()
    val_loss = 0
    val_correct = 0
    val_total = 0
    all_preds = []
    all_labels = []
    
    with torch.no_grad():
        for batch in tqdm(val_loader, desc="Validating", leave=False):
            input_ids = batch['input_ids'].to(device)
            pixel_values = batch['pixel_values'].to(device)
            labels = batch['labels'].to(device)
            
            outputs = model(input_ids, pixel_values)
            loss = criterion(outputs, labels)
            
            val_loss += loss.item()
            _, predicted = outputs.max(1)
            val_total += labels.size(0)
            val_correct += predicted.eq(labels).sum().item()
            
            all_preds.extend(predicted.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
    
    # 计算每个类别的指标
    class_metrics = {}
    emotion_list = list(EMOTION_MAP.keys())
    
    # 计算整体指标
    overall_f1 = f1_score(all_labels, all_preds, average='weighted')
    overall_precision = precision_score(all_labels, all_preds, average='weighted')
    overall_recall = recall_score(all_labels, all_preds, average='weighted')
    
    # 计算每个类别的指标
    class_report = classification_report(all_labels, all_preds, 
                                      target_names=emotion_list, 
                                      output_dict=True)
    
    # 记录每个类别的详细指标
    for emotion in emotion_list:
        class_metrics[emotion] = {
            'precision': class_report[emotion]['precision'],
            'recall': class_report[emotion]['recall'],
            'f1-score': class_report[emotion]['f1-score']
        }
    
    metrics = {
        'loss': val_loss / len(val_loader),
        'accuracy': 100. * val_correct / val_total,
        'f1_score': overall_f1 * 100,
        'precision': overall_precision * 100,
        'recall': overall_recall * 100,
        'class_metrics': class_metrics
    }
    
    return metrics

def clean_memory():
    """清理 GPU 和 CPU 内存"""
    import gc
    gc.collect()
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.synchronize()

def log_message(message):
    """记录训练日志"""
    print(message)
    with open(os.path.join(root_dir, 'logs', 'train.log'), 'a') as f:
        f.write(message + '\n')

def train(config):
    print("\n=== 开始训练函数 ===")
    # 设置随机种子
    torch.manual_seed(42)
    torch.cuda.manual_seed_all(42)
    
    # 设置设备
    device = torch.device(config['model']['device'])
    print(f"使用设备: {device}")
    
    # 加载数据
    train_loader, val_loader, test_loader = prepare_dataloaders(config)
    
    print("\n数据集信息:")
    print(f"训练集样本数: {len(train_loader.dataset)}")
    print(f"验证集样本数: {len(val_loader.dataset)}")
    print(f"测试集样本数: {len(test_loader.dataset)}")
    
    # 加载预训练模型
    print("正在加载预训练模型...")
    
    # 获取原始配置
    from llava.model import LlavaConfig
    llava_config = LlavaConfig.from_pretrained(config['model']['pretrained_path'])
    
    # 确保视觉相关配置正确
    if not hasattr(llava_config, 'vision_tower') or llava_config.vision_tower is None:
        llava_config.vision_tower = config['model'].get('vision_tower', "openai/clip-vit-large-patch14-336")
    if not hasattr(llava_config, 'mm_vision_tower') or llava_config.mm_vision_tower is None:
        llava_config.mm_vision_tower = config['model'].get('vision_tower', "openai/clip-vit-large-patch14-336")
    
    # 加载完整的LLaVA模型
    print("加载LLaVA模型...")
    if config['model'].get('load_in_8bit', False):
        llava_model = LlavaLlamaForCausalLM.from_pretrained(
            config['model']['pretrained_path'],
            config=llava_config,
            load_in_8bit=True,
            device_map="auto",
            torch_dtype=torch.float32,
            low_cpu_mem_usage=True
        )
    else:
        llava_model = LlavaLlamaForCausalLM.from_pretrained(
            config['model']['pretrained_path'],
            config=llava_config,
            torch_dtype=torch.float32,
            low_cpu_mem_usage=True
        ).to(device)
    
    print("模型加载完成，创建分类器...")
    
    # 创建分类器
    model = EmotionClassifier(llava_model).to(device)
    
    # 设置损失函数
    criterion = nn.CrossEntropyLoss()
    
    # 设置混合精度训练
    scaler = torch.cuda.amp.GradScaler() if config['training'].get('fp16', False) else None
    
    # 设置检查点路径 - 使用最佳模型而不是紧急检查点
    checkpoint_path = os.path.join(root_dir, 'models', 'best_model.pth')
    print(f"\n使用最佳模型检查点继续训练: {checkpoint_path}")
    
    # 设置所有指标的初始值
    start_epoch = 9  # 直接从第9轮开始
    global_step = None  # 将从检查点中获取
    
    # 设置所有指标的最后记录值
    metrics_values = {
        'val/sadness_recall': 58.56444,
        'val/sadness_precision': 65.75892,
        'val/sadness_f1': 61.94996,
        'val/recall': 62.50635,
        'val/precision': 65.48239,
        'val/loss': 1.27308,
        'best_val_acc': 68.26,
        'best_f1': 68.20
    }
    
    # 使用第9轮完成时的wandb run ID
    run_id = "xcedjkii"  # 第9轮完成时的run ID
    print(f"继续使用wandb run: {run_id}")
    
    # 初始化wandb
    wandb.init(
        project="llava-emotion",
        id=run_id,
        resume="must",
        config=config
    )
    
    if os.path.exists(checkpoint_path):
        try:
            print("\n加载检查点...")
            checkpoint = torch.load(checkpoint_path)
            print("\n检查点详细信息:")
            print("====================")
            print("1. 包含的键:", list(checkpoint.keys()))
            print("\n2. 当前epoch:", checkpoint['epoch'])
            print("3. 最佳验证准确率:", checkpoint['best_val_acc'])
            print("4. 最佳F1分数:", checkpoint['best_f1'])
            print("====================\n")
            
            # 加载模型状态
            print("正在加载模型状态...")
            model.load_state_dict(checkpoint['model_state_dict'])
            print("模型状态加载完成")
            
            # 重新初始化优化器和调度器
            print("\n重新初始化优化器和调度器...")
            
            # 使用较小的学习率，因为模型已经训练得比较好了
            initial_lr = 5e-5
            optimizer = AdamW(
                model.parameters(),
                lr=initial_lr,
                weight_decay=0.01
            )
            
            # 设置学习率调度器
            remaining_epochs = config['training']['epochs'] - start_epoch
            total_steps = remaining_epochs * len(train_loader)
            warmup_steps = len(train_loader) // 4  # 使用1/4个epoch作为warmup
            
            scheduler = get_cosine_schedule_with_warmup(
                optimizer,
                num_warmup_steps=warmup_steps,
                num_training_steps=total_steps,
                num_cycles=0.5  # 使用半个余弦周期
            )
            
            print(f"- 初始学习率: {initial_lr:.2e}")
            print(f"- 总训练步数: {total_steps}")
            print(f"- Warmup步数: {warmup_steps}")
            
            # 设置全局步数
            global_step = 0
            
        except Exception as e:
            print(f"加载检查点时出错: {str(e)}")
            print("错误详细信息:")
            import traceback
            traceback.print_exc()
            raise e  # 如果加载失败就停止训练
    else:
        print(f"未找到检查点文件: {checkpoint_path}")
        print("请确保检查点文件存在")
        raise FileNotFoundError(f"检查点文件不存在: {checkpoint_path}")
        
    print(f"\n从epoch {start_epoch+1} 继续训练（总共 {config['training']['epochs']} 轮）")
    print(f"之前最佳验证准确率: {metrics_values['best_val_acc']:.2f}%, 最佳F1分数: {metrics_values['best_f1']:.2f}%")
    print(f"继续训练，从step {global_step} 开始")

    print("\n=== 开始训练循环 ===")
    print(f"训练轮次范围: {start_epoch} 到 {config['training']['epochs']}")
    
    # 训练循环
    for epoch in range(start_epoch, config['training']['epochs']):
        try:
            print(f"\n开始第 {epoch+1} 轮训练...")
            model.train()
            train_loss = 0
            train_correct = 0
            train_total = 0
            optimizer.zero_grad()
            
            progress_bar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{config['training']['epochs']}")
            for batch_idx, batch in enumerate(progress_bar):
                input_ids = batch['input_ids'].to(device)
                pixel_values = batch['pixel_values'].to(device)
                labels = batch['labels'].to(device)
                
                # 使用混合精度训练
                if config['training'].get('fp16', False):
                    with torch.cuda.amp.autocast():
                        outputs = model(input_ids, pixel_values)
                        loss = criterion(outputs, labels)
                        loss = loss / config['training']['gradient_accumulation_steps']
                    
                    scaler.scale(loss).backward()
                    
                    if (batch_idx + 1) % config['training']['gradient_accumulation_steps'] == 0:
                        if config['training'].get('max_grad_norm', 0) > 0:
                            scaler.unscale_(optimizer)
                            torch.nn.utils.clip_grad_norm_(model.parameters(), config['training']['max_grad_norm'])
                        
                        scaler.step(optimizer)
                        scaler.update()
                        optimizer.zero_grad()
                        scheduler.step()
                        global_step += 1  # 更新全局步数
                else:
                    outputs = model(input_ids, pixel_values)
                    loss = criterion(outputs, labels)
                    loss = loss / config['training']['gradient_accumulation_steps']
                    
                    loss.backward()
                    
                    if (batch_idx + 1) % config['training']['gradient_accumulation_steps'] == 0:
                        if config['training'].get('max_grad_norm', 0) > 0:
                            torch.nn.utils.clip_grad_norm_(model.parameters(), config['training']['max_grad_norm'])
                        optimizer.step()
                        optimizer.zero_grad()
                        scheduler.step()
                        global_step += 1  # 更新全局步数
                
                train_loss += loss.item() * config['training']['gradient_accumulation_steps']
                _, predicted = outputs.max(1)
                train_total += labels.size(0)
                train_correct += predicted.eq(labels).sum().item()
                
                # 更新进度条
                progress_bar.set_postfix({
                    'loss': f"{train_loss/(batch_idx+1):.4f}",
                    'acc': f"{100.*train_correct/train_total:.2f}%",
                    'step': global_step
                })
                
                # 记录到wandb
                if (batch_idx + 1) % config['training']['logging_steps'] == 0:
                    metrics_to_log = {
                        "train/step_loss": loss.item() * config['training']['gradient_accumulation_steps'],
                        "train/step_acc": 100. * train_correct / train_total,
                        "train/learning_rate": scheduler.get_last_lr()[0],
                        "global_step": global_step,
                        "epoch": epoch  # 只记录整数epoch
                    }
                    # 只在验证后更新验证指标
                    wandb.log(metrics_to_log)
                
                # 每100个批次保存一次检查点
                if (batch_idx + 1) % 100 == 0:
                    checkpoint = {
                        'epoch': epoch,
                        'batch_idx': batch_idx,
                        'model_state_dict': model.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                        'scheduler_state_dict': scheduler.state_dict(),
                        'best_val_acc': metrics_values['best_val_acc'],
                        'best_f1': metrics_values['best_f1'],
                        'global_step': global_step
                    }
                    torch.save(checkpoint, checkpoint_path)
                    print(f"\n保存检查点 epoch {epoch} batch {batch_idx} step {global_step}")
            
            # 计算训练集整体指标
            train_loss = train_loss / len(train_loader)
            train_acc = 100. * train_correct / train_total
            
            # 验证
            val_metrics = validate(model, val_loader, criterion, device)
            
            # 记录训练指标
            metrics = {
                'train/epoch_loss': train_loss,
                'train/epoch_acc': train_acc,
                'val/loss': val_metrics['loss'],
                'val/acc': val_metrics['accuracy'],
                'val/f1': val_metrics['f1_score'],
                'val/precision': val_metrics['precision'],
                'val/recall': val_metrics['recall'],
                'epoch': epoch + 1,
                'global_step': global_step
            }
            
            # 记录每个类别的指标
            for emotion, emotion_metrics in val_metrics['class_metrics'].items():
                metrics.update({
                    f'val/{emotion}_precision': emotion_metrics['precision'] * 100,
                    f'val/{emotion}_recall': emotion_metrics['recall'] * 100,
                    f'val/{emotion}_f1': emotion_metrics['f1-score'] * 100
                })
            
            wandb.log(metrics)
            
            # 记录到文件
            log_message(f"\nEpoch {epoch+1}")
            log_message(f"训练损失: {train_loss:.4f}, 训练准确率: {train_acc:.2f}%")
            log_message(f"验证损失: {val_metrics['loss']:.4f}, 验证准确率: {val_metrics['accuracy']:.2f}%")
            log_message(f"验证F1分数: {val_metrics['f1_score']:.2f}%, 精确率: {val_metrics['precision']:.2f}%, 召回率: {val_metrics['recall']:.2f}%")
            log_message("\n各类别指标:")
            for emotion, metrics in val_metrics['class_metrics'].items():
                log_message(f"{emotion}:")
                log_message(f"  精确率: {metrics['precision']*100:.2f}%")
                log_message(f"  召回率: {metrics['recall']*100:.2f}%")
                log_message(f"  F1分数: {metrics['f1-score']*100:.2f}%")
            
            # 保存最佳模型
            if val_metrics['accuracy'] > metrics_values['best_val_acc'] or val_metrics['f1_score'] > metrics_values['best_f1']:
                metrics_values['best_val_acc'] = max(val_metrics['accuracy'], metrics_values['best_val_acc'])
                metrics_values['best_f1'] = max(val_metrics['f1_score'], metrics_values['best_f1'])
                torch.save({
                    'epoch': epoch + 1,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'scheduler_state_dict': scheduler.state_dict(),
                    'best_val_acc': metrics_values['best_val_acc'],
                    'best_f1': metrics_values['best_f1'],
                    'config': config
                }, checkpoint_path)
                log_message(f"\n保存最佳模型，验证准确率: {val_metrics['accuracy']:.2f}%, F1分数: {val_metrics['f1_score']:.2f}%")
            
        except Exception as e:
            log_message(f"\n训练过程中出现错误: {str(e)}")
            # 保存紧急检查点
            checkpoint = {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'best_val_acc': metrics_values['best_val_acc'],
                'best_f1': metrics_values['best_f1'],
            }
            emergency_path = os.path.join(config['output']['model_save_dir'], f'emergency_checkpoint_epoch_{epoch}.pth')
            torch.save(checkpoint, emergency_path)
            log_message(f"已保存紧急检查点: {emergency_path}")
            raise e
    
    wandb.finish()
    return model

if __name__ == '__main__':
    # 加载配置
    config_path = os.path.join(root_dir, 'configs', 'config.yaml')
    print(f"正在加载配置文件: {config_path}")
    
    with open(config_path, 'r') as f:
        config = yaml.safe_load(f)
    
    # 创建输出目录
    os.makedirs(config['output']['model_save_dir'], exist_ok=True)
    
    # 开始训练
    model = train(config) 