import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader, TensorDataset
from model_advanced import HybridWorldModel
from utils import prepare_batch
from config import *
import torch.nn.functional as F


class FocalLoss(nn.Module):
    """焦点损失函数，更关注难以预测的样本"""

    def __init__(self, gamma=2.0, alpha=0.25):
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        self.alpha = alpha

    def forward(self, inputs, targets):
        BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
        pt = torch.exp(-BCE_loss)
        F_loss = self.alpha * (1 - pt) ** self.gamma * BCE_loss
        return F_loss.mean()


def advanced_train(model, train_dataset, val_dataset, epochs=NUM_EPOCHS):
    """高级训练策略"""
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # 创建数据加载器，支持不同批量大小
    train_small_batch = DataLoader(train_dataset, batch_size=32, shuffle=True)
    train_medium_batch = DataLoader(train_dataset, batch_size=64, shuffle=True)
    train_large_batch = DataLoader(train_dataset, batch_size=128, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=64)

    # 定义损失函数
    env_criterion = nn.CrossEntropyLoss()
    # 对于智能体位置使用多种损失
    pos_criterion = nn.MSELoss()
    focal_criterion = FocalLoss(gamma=2.0, alpha=0.25)
    prob_criterion = nn.CrossEntropyLoss()

    # 定义优化器和学习率调度
    optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.01)
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=0.005,
        epochs=epochs,
        steps_per_epoch=len(train_medium_batch),
        pct_start=0.3,
        div_factor=10.0
    )

    # 早停设置
    best_val_acc = 0
    patience = 30
    patience_counter = 0
    best_model_weights = None

    # 多阶段训练循环
    for epoch in range(epochs):
        model.train()
        total_loss = 0
        env_acc = 0
        agent_pos_acc = 0
        samples = 0

        # 根据阶段选择批次大小
        if epoch < epochs // 4:  # 第一阶段：小批次，高学习率
            train_loader = train_small_batch
            for param_group in optimizer.param_groups:
                param_group['lr'] = scheduler.get_last_lr()[0] * 1.5
        elif epoch < epochs // 2:  # 第二阶段：中批次，标准学习率
            train_loader = train_medium_batch
        else:  # 第三阶段：大批次，较低学习率
            train_loader = train_large_batch
            for param_group in optimizer.param_groups:
                param_group['lr'] = scheduler.get_last_lr()[0] * 0.8

        # 训练一个轮次
        for states, actions, next_states, agent_positions, agent_movements in train_loader:
            states = states.to(device)
            next_states = next_states.to(device)
            agent_positions = agent_positions.to(device)
            agent_movements = agent_movements.to(device)

            # 创建历史状态序列（这里简化为None，实际应从数据集获取）
            history = None

            # 前向传递
            env_outputs, agent_outputs = model(states, history)

            # 计算环境损失
            batch_size = states.size(0)
            env_loss = compute_env_loss(env_outputs, next_states, env_criterion, batch_size)

            # 计算智能体损失（组合多种损失）
            agent_loss = compute_agent_loss(
                agent_outputs,
                agent_movements,
                agent_positions,
                pos_criterion,
                focal_criterion,
                prob_criterion,
                states,
                env_outputs
            )

            # 组合损失
            loss = 0.1 * env_loss + 0.9 * agent_loss

            # 反向传播
            optimizer.zero_grad()
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()
            scheduler.step()

            # 计算准确率
            batch_env_acc, batch_agent_acc = compute_accuracy(
                env_outputs, next_states, agent_outputs, agent_positions
            )

            # 累加指标
            total_loss += loss.item() * batch_size
            env_acc += batch_env_acc * batch_size
            agent_pos_acc += batch_agent_acc * batch_size
            samples += batch_size

        # 计算平均训练指标
        train_loss = total_loss / samples
        train_env_acc = env_acc / samples
        train_agent_acc = agent_pos_acc / samples

        # 验证
        val_loss, val_env_acc, val_agent_acc = validate(
            model, val_loader, env_criterion, pos_criterion, focal_criterion, prob_criterion, device
        )

        # 打印进度
        print(f"Epoch {epoch + 1}/{epochs}, "
              f"Train Loss: {train_loss:.4f}, "
              f"Train Acc: Env: {train_env_acc:.4f}, Agent: {train_agent_acc:.4f}, "
              f"Val Loss: {val_loss:.4f}, "
              f"Val Acc: Env: {val_env_acc:.4f}, Agent: {val_agent_acc:.4f}")

        # 早停检查
        if val_agent_acc > best_val_acc:
            best_val_acc = val_agent_acc
            patience_counter = 0
            best_model_weights = model.state_dict().copy()
            torch.save(best_model_weights, "models/hybrid_world_model_best.pth")
            print(f"保存新的最佳模型，智能体准确率: {val_agent_acc:.4f}")
        else:
            patience_counter += 1
            if patience_counter >= patience:
                print(f"早停: {patience}轮没有改善，在第{epoch + 1}轮停止训练")
                model.load_state_dict(best_model_weights)
                break

    return model


def compute_env_loss(env_outputs, next_states, criterion, batch_size):
    """计算环境损失"""
    # 计算特征总数
    features_per_cell = next_states.size(1) // (ENV_SIZE * ENV_SIZE)

    # 重塑为网格形式以提取基本特征
    next_states_reshaped = next_states.view(batch_size, ENV_SIZE, ENV_SIZE, features_per_cell)
    next_states_basic = next_states_reshaped[:, :, :, :CELL_FEATURES]

    # 将环境输出和下一个状态索引重塑为相匹配的形状
    env_outputs_flat = env_outputs.view(batch_size * ENV_SIZE * ENV_SIZE, CELL_FEATURES)
    next_states_indices = torch.argmax(next_states_basic, dim=3).view(batch_size * ENV_SIZE * ENV_SIZE)

    # 直接计算损失 - 不再使用循环
    env_loss = criterion(env_outputs_flat, next_states_indices)

    return env_loss


def compute_agent_loss(agent_outputs, agent_movements, agent_positions,
                       pos_criterion, focal_criterion, prob_criterion,
                       states, env_outputs):
    """计算智能体损失（组合多种损失类型）"""
    batch_size = agent_outputs.size(0)

    # 相对移动损失 (dx, dy)
    rel_move_pred = agent_outputs[:, :2]
    rel_move_true = agent_movements[:, :2]
    rel_move_loss = pos_criterion(rel_move_pred, rel_move_true)

    # 绝对位置损失 (x, y)
    abs_pos_pred = agent_outputs[:, 2:]
    abs_pos_true = agent_positions
    abs_pos_loss = pos_criterion(abs_pos_pred, abs_pos_true)

    # 焦点损失 - 关注难以预测的位置
    # 将位置转换为0-1范围内的目标
    normalized_pos_true = abs_pos_true / (ENV_SIZE - 1)
    normalized_pos_pred = abs_pos_pred / (ENV_SIZE - 1)
    focal_loss = focal_criterion(normalized_pos_pred, normalized_pos_true)

    # 结合损失 - 给予更多权重给绝对位置预测
    combined_loss = 0.3 * rel_move_loss + 0.5 * abs_pos_loss + 0.2 * focal_loss

    return combined_loss


def compute_accuracy(env_outputs, next_states, agent_outputs, agent_positions):
    """计算准确率"""
    batch_size = env_outputs.size(0)

    # 环境准确率
    env_acc = 0
    next_states_reshaped = extract_basic_features(next_states, batch_size)

    for i in range(batch_size):
        env_pred = torch.argmax(env_outputs[i], dim=-1)
        env_true = torch.argmax(next_states_reshaped[i], dim=-1)
        env_acc += torch.mean((env_pred == env_true).float()).item()

    env_acc /= batch_size

    # 智能体位置准确率
    agent_pos_pred = torch.round(agent_outputs[:, 2:]).long()
    agent_pos_true = agent_positions.long()

    agent_acc = torch.mean(
        torch.all(agent_pos_pred == agent_pos_true, dim=1).float()
    ).item()

    return env_acc, agent_acc


def validate(model, val_loader, env_criterion, pos_criterion, focal_criterion, prob_criterion, device):
    """验证模型"""
    model.eval()
    val_loss = 0
    val_env_acc = 0
    val_agent_acc = 0
    val_samples = 0

    with torch.no_grad():
        for states, actions, next_states, agent_positions, agent_movements in val_loader:
            states = states.to(device)
            next_states = next_states.to(device)
            agent_positions = agent_positions.to(device)
            agent_movements = agent_movements.to(device)

            # 历史状态 (简化为None)
            history = None

            # 前向传递
            env_outputs, agent_outputs = model(states, history)

            # 计算损失
            batch_size = states.size(0)
            env_loss = compute_env_loss(env_outputs, next_states, env_criterion, batch_size)
            agent_loss = compute_agent_loss(
                agent_outputs,
                agent_movements,
                agent_positions,
                pos_criterion,
                focal_criterion,
                prob_criterion,
                states,
                env_outputs
            )

            loss = 0.1 * env_loss + 0.9 * agent_loss

            # 计算准确率
            batch_env_acc, batch_agent_acc = compute_accuracy(
                env_outputs, next_states, agent_outputs, agent_positions
            )

            # 累加指标
            val_loss += loss.item() * batch_size
            val_env_acc += batch_env_acc * batch_size
            val_agent_acc += batch_agent_acc * batch_size
            val_samples += batch_size

    # 计算平均指标
    val_loss /= val_samples
    val_env_acc /= val_samples
    val_agent_acc /= val_samples

    return val_loss, val_env_acc, val_agent_acc


def extract_basic_features(next_states, batch_size):
    """从增强状态表示中提取基本单元格特征"""
    # 估计特征总数
    total_features = next_states.size(1) // (ENV_SIZE * ENV_SIZE)

    # 重塑为 [batch_size, ENV_SIZE, ENV_SIZE, total_features]
    next_states_reshaped = next_states.view(batch_size, ENV_SIZE, ENV_SIZE, total_features)

    # 只取前CELL_FEATURES个特征（基本单元格状态）
    next_states_basic = next_states_reshaped[:, :, :, :CELL_FEATURES]

    return next_states_basic
