import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import os
from model import EnhancedWorldModel
from utils import prepare_batch, calculate_accuracy, calculate_agent_position_accuracy, calculate_agent_movement_accuracy
from visualize import plot_training_progress
from config import *
from environment import MazeEnvironment

def train(model, train_loader, val_loader, epochs=NUM_EPOCHS, 
          env_lr=LEARNING_RATE, agent_lr=AGENT_LEARNING_RATE, weight_decay=WEIGHT_DECAY):
    """训练增强版世界模型"""
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    
    # 定义损失函数
    env_criterion = nn.CrossEntropyLoss()  # 用于环境状态预测
    agent_criterion = nn.MSELoss()         # 用于智能体位置预测
    
    # 损失权重
    env_weight = ENV_LOSS_WEIGHT
    agent_weight = AGENT_LOSS_WEIGHT
    
    # 计算额外特征带来的输入尺寸增加
    additional_features = 0
    if USE_DISTANCE_MAPS:
        additional_features += 3  # 障碍物、目标、智能体距离图
    if USE_DIRECTION_MAPS:
        additional_features += 4  # 目标方向(sin,cos)、智能体方向(sin,cos)
    
    total_features_per_cell = CELL_FEATURES + additional_features
    
    # 创建优化器参数组
    # 为环境分支创建参数组
    env_params = []
    agent_params = []
    
    # 为图编码器和共享层创建参数组
    for name, param in model.named_parameters():
        if 'graph_encoder' in name or 'fc1' in name or 'bn1' in name:
            env_params.append({'params': param, 'lr': env_lr})
            agent_params.append({'params': param, 'lr': agent_lr})
        elif 'env_' in name:
            env_params.append({'params': param, 'lr': env_lr})
        elif 'agent_' in name:
            agent_params.append({'params': param, 'lr': agent_lr})
    
    # 创建优化器
    env_optimizer = optim.Adam(env_params, weight_decay=weight_decay)
    agent_optimizer = optim.Adam(agent_params, weight_decay=weight_decay)
    
    # 创建学习率调度器
    env_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        env_optimizer, mode='max', factor=0.5, patience=15, verbose=True, min_lr=MIN_LR
    )
    agent_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        agent_optimizer, mode='max', factor=0.5, patience=10, verbose=True, min_lr=MIN_LR
    )
    
    # 初始化度量
    train_losses = []
    val_losses = []
    train_env_accuracies = []
    val_env_accuracies = []
    train_agent_pos_accuracies = []
    val_agent_pos_accuracies = []
    train_agent_mov_accuracies = []
    val_agent_mov_accuracies = []
    
    # 加载静态掩码
    static_mask = np.load('static_mask.npy')
    
    # 早停机制
    best_agent_acc = 0.0
    no_improve_count = 0
    best_model_state = None
    
    for epoch in range(epochs):
        model.train()
        train_loss = 0
        train_env_loss = 0
        train_agent_loss = 0
        train_env_acc = 0
        train_agent_pos_acc = 0
        train_agent_mov_acc = 0
        train_samples = 0
        
        for states, actions, next_states, agent_positions, agent_movements in train_loader:
            states = states.to(device)
            next_states = next_states.to(device)
            agent_positions = agent_positions.to(device)
            agent_movements = agent_movements.to(device)
            
            # 前向传递
            env_outputs, agent_outputs = model(states)
            
            # 环境状态损失
            env_outputs_flat = env_outputs.contiguous().view(-1, CELL_FEATURES)
            
            # 处理增强状态表示 - 从next_states中提取基本的单元格状态
            batch_size = next_states.size(0)
            
            # 重塑next_states以提取单元格状态
            next_states_reshaped = next_states.view(batch_size, ENV_SIZE, ENV_SIZE, total_features_per_cell)
            # 只取前CELL_FEATURES个通道，这是one-hot编码的单元格状态
            next_states_cells = next_states_reshaped[:, :, :, :CELL_FEATURES]
            
            # 将单元格状态转换为索引形式用于损失计算
            next_states_indices = torch.argmax(next_states_cells, dim=3).reshape(batch_size, -1)
            
            env_loss = 0
            for i in range(batch_size):
                env_loss += env_criterion(
                    env_outputs_flat[i*ENV_SIZE*ENV_SIZE:(i+1)*ENV_SIZE*ENV_SIZE], 
                    next_states_indices[i]
                )
            
            env_loss /= batch_size
            
            # 智能体位置损失 - 使用相对移动和当前位置
            agent_loss = agent_criterion(agent_outputs, agent_movements)
            
            # 组合损失
            loss = env_weight * env_loss + agent_weight * agent_loss
            
            # 反向传播 - 分别更新两个优化器
            env_optimizer.zero_grad()
            agent_optimizer.zero_grad()
            loss.backward()
            env_optimizer.step()
            agent_optimizer.step()
            
            # 计算环境准确率
            env_acc = 0
            
            for i in range(batch_size):
                env_pred = torch.argmax(env_outputs[i], dim=-1).reshape(-1)
                env_true = next_states_indices[i]
                
                # 计算准确率
                env_acc += torch.mean((env_pred == env_true).float()).item()
            
            env_acc /= batch_size
            
            # 计算智能体绝对位置准确率
            agent_pos_preds = torch.round(agent_outputs[:, 2:]).long()  # x, y
            agent_pos_trues = agent_positions.long()
            
            agent_pos_acc = torch.mean(
                torch.all(agent_pos_preds == agent_pos_trues, dim=1).float()
            ).item()
            
            # 计算智能体相对移动准确率
            agent_mov_preds = torch.round(agent_outputs[:, :2])  # dx, dy
            agent_mov_trues = agent_movements[:, :2]  # dx, dy
            
            agent_mov_acc = torch.mean(
                torch.all(agent_mov_preds == agent_mov_trues, dim=1).float()
            ).item()
            
            # 累加结果
            train_loss += loss.item()
            train_env_loss += env_loss.item()
            train_agent_loss += agent_loss.item()
            train_env_acc += env_acc * batch_size
            train_agent_pos_acc += agent_pos_acc * batch_size
            train_agent_mov_acc += agent_mov_acc * batch_size
            train_samples += batch_size
        
        # 计算平均训练指标
        train_loss /= len(train_loader)
        train_env_loss /= len(train_loader)
        train_agent_loss /= len(train_loader)
        train_env_acc /= train_samples
        train_agent_pos_acc /= train_samples
        train_agent_mov_acc /= train_samples
        
        # 验证
        model.eval()
        val_loss = 0
        val_env_loss = 0
        val_agent_loss = 0
        val_env_acc = 0
        val_agent_pos_acc = 0
        val_agent_mov_acc = 0
        val_samples = 0
        
        with torch.no_grad():
            for states, actions, next_states, agent_positions, agent_movements in val_loader:
                states = states.to(device)
                next_states = next_states.to(device)
                agent_positions = agent_positions.to(device)
                agent_movements = agent_movements.to(device)
                
                # 前向传递
                env_outputs, agent_outputs = model(states)
                
                # 环境状态损失 - 与训练逻辑相同
                env_outputs_flat = env_outputs.contiguous().view(-1, CELL_FEATURES)
                
                # 处理增强状态表示
                batch_size = next_states.size(0)
                next_states_reshaped = next_states.view(batch_size, ENV_SIZE, ENV_SIZE, total_features_per_cell)
                next_states_cells = next_states_reshaped[:, :, :, :CELL_FEATURES]
                next_states_indices = torch.argmax(next_states_cells, dim=3).reshape(batch_size, -1)
                
                env_loss = 0
                for i in range(batch_size):
                    env_loss += env_criterion(
                        env_outputs_flat[i*ENV_SIZE*ENV_SIZE:(i+1)*ENV_SIZE*ENV_SIZE], 
                        next_states_indices[i]
                    )
                
                env_loss /= batch_size
                
                # 智能体位置损失
                agent_loss = agent_criterion(agent_outputs, agent_movements)
                
                # 组合损失
                loss = env_weight * env_loss + agent_weight * agent_loss
                
                # 计算环境准确率
                env_acc = 0
                
                for i in range(batch_size):
                    env_pred = torch.argmax(env_outputs[i], dim=-1).reshape(-1)
                    env_true = next_states_indices[i]
                    
                    # 计算准确率
                    env_acc += torch.mean((env_pred == env_true).float()).item()
                
                env_acc /= batch_size
                
                # 计算智能体位置准确率
                agent_pos_preds = torch.round(agent_outputs[:, 2:]).long()  # x, y
                agent_pos_trues = agent_positions.long()
                
                agent_pos_acc = torch.mean(
                    torch.all(agent_pos_preds == agent_pos_trues, dim=1).float()
                ).item()
                
                # 计算智能体相对移动准确率
                agent_mov_preds = torch.round(agent_outputs[:, :2])  # dx, dy
                agent_mov_trues = agent_movements[:, :2]  # dx, dy
                
                agent_mov_acc = torch.mean(
                    torch.all(agent_mov_preds == agent_mov_trues, dim=1).float()
                ).item()
                
                # 累加结果
                val_loss += loss.item()
                val_env_loss += env_loss.item()
                val_agent_loss += agent_loss.item()
                val_env_acc += env_acc * batch_size
                val_agent_pos_acc += agent_pos_acc * batch_size
                val_agent_mov_acc += agent_mov_acc * batch_size
                val_samples += batch_size
            
            # 计算平均验证指标
            val_loss /= len(val_loader)
            val_env_loss /= len(val_loader)
            val_agent_loss /= len(val_loader)
            val_env_acc /= val_samples
            val_agent_pos_acc /= val_samples
            val_agent_mov_acc /= val_samples
        
        # 保存指标
        train_losses.append(train_loss)
        val_losses.append(val_loss)
        train_env_accuracies.append(train_env_acc)
        val_env_accuracies.append(val_env_acc)
        train_agent_pos_accuracies.append(train_agent_pos_acc)
        val_agent_pos_accuracies.append(val_agent_pos_acc)
        train_agent_mov_accuracies.append(train_agent_mov_acc)
        val_agent_mov_accuracies.append(val_agent_mov_acc)
        
        # 更新学习率调度器
        env_scheduler.step(val_env_acc)
        agent_scheduler.step(val_agent_pos_acc)  # 使用位置准确率作为调度标准
        
        # 早停检查 - 基于智能体位置准确率
        combined_agent_acc = val_agent_pos_acc * 0.7 + val_agent_mov_acc * 0.3  # 综合两种准确率，更关注位置
        if combined_agent_acc > best_agent_acc:
            best_agent_acc = combined_agent_acc
            no_improve_count = 0
            best_model_state = model.state_dict().copy()
            # 保存最佳模型
            torch.save(model.state_dict(), "models/enhanced_world_model_best.pth")
            print(f"保存当前最佳模型，智能体准确率: {combined_agent_acc:.4f}")
        else:
            no_improve_count += 1
        
        # 提前终止检查
        if no_improve_count >= PATIENCE:
            print(f"早停：{PATIENCE}轮内未改善，在轮次{epoch+1}终止训练")
            # 恢复最佳模型
            model.load_state_dict(best_model_state)
            break
        
        # 打印进度
        print(f"Epoch {epoch+1}/{epochs}, "
              f"Train Loss: {train_loss:.4f} (Env: {train_env_loss:.4f}, Agent: {train_agent_loss:.4f}), "
              f"Train Acc: Env: {train_env_acc:.4f}, Agent Pos: {train_agent_pos_acc:.4f}, Agent Mov: {train_agent_mov_acc:.4f}, "
              f"Val Loss: {val_loss:.4f} (Env: {val_env_loss:.4f}, Agent: {val_agent_loss:.4f}), "
              f"Val Acc: Env: {val_env_acc:.4f}, Agent Pos: {val_agent_pos_acc:.4f}, Agent Mov: {val_agent_mov_acc:.4f}")
    
    # 绘制训练进度
    plot_training_progress(train_losses, val_losses, 
                           train_env_accuracies, val_env_accuracies,
                           train_agent_pos_accuracies, val_agent_pos_accuracies,
                           train_agent_mov_accuracies, val_agent_mov_accuracies,
                           save_path="training_progress.png")
    
    return model, (train_losses, val_losses, train_env_accuracies, val_env_accuracies, 
                   train_agent_pos_accuracies, val_agent_pos_accuracies,
                   train_agent_mov_accuracies, val_agent_mov_accuracies)

def prepare_data_loaders(train_data, val_data, batch_size=BATCH_SIZE):
    """准备用于训练和验证的数据加载器"""
    # 解包数据
    train_states, train_actions, train_next_states, train_agent_positions, train_agent_movements = train_data
    val_states, val_actions, val_next_states, val_agent_positions, val_agent_movements = val_data
    
    # 创建PyTorch数据集
    train_dataset = TensorDataset(*prepare_batch(train_states, train_actions, train_next_states, 
                                               train_agent_positions, train_agent_movements))
    val_dataset = TensorDataset(*prepare_batch(val_states, val_actions, val_next_states, 
                                             val_agent_positions, val_agent_movements))
    
    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size)
    
    return train_loader, val_loader