import torch
import torch.nn as nn
import numpy as np
import os
from model import EnhancedWorldModel
from environment import MazeEnvironment
from utils import calculate_accuracy, calculate_agent_position_accuracy, calculate_agent_movement_accuracy
from utils import create_enhanced_state_input
from visualize import visualize_grid, visualize_prediction
from config import *


def evaluate(model, test_data, num_visualizations=5):
    """评估增强版世界模型"""
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.eval()

    # 解包测试数据
    test_states, test_actions, test_next_states, test_agent_positions, test_agent_movements = test_data

    # 创建可视化目录
    os.makedirs("visualizations", exist_ok=True)

    # 初始化指标
    total_loss = 0
    env_loss = 0
    agent_loss = 0
    env_acc = 0
    static_acc = 0
    agent_pos_acc = 0
    agent_mov_acc = 0
    count = 0

    # 定义损失函数
    env_criterion = nn.CrossEntropyLoss()
    agent_criterion = nn.MSELoss()

    # 损失权重
    env_weight = ENV_LOSS_WEIGHT
    agent_weight = AGENT_LOSS_WEIGHT

    # 加载静态掩码
    static_mask = np.load('static_mask.npy')

    # 创建环境用于可视化
    env = MazeEnvironment(seed=RANDOM_SEED)

    with torch.no_grad():
        for i in range(len(test_states)):
            # 获取数据
            state = test_states[i]
            action = test_actions[i]

            additional_features = 0
            if USE_DISTANCE_MAPS:
                additional_features += 3  # 障碍物、目标、智能体距离图
            if USE_DIRECTION_MAPS:
                additional_features += 4  # 目标方向(sin,cos)、智能体方向(sin,cos)

            total_features_per_cell = CELL_FEATURES + additional_features

            # 将测试状态转换为环境网格形式
            state_grid = np.argmax(state.reshape(ENV_SIZE, ENV_SIZE, total_features_per_cell)[:, :, :CELL_FEATURES], axis=2)
            next_state_grid = np.argmax(test_next_states[i].reshape(ENV_SIZE, ENV_SIZE, total_features_per_cell)[:, :, :CELL_FEATURES], axis=2)

            true_agent_position = test_agent_positions[i]
            true_agent_movement = test_agent_movements[i]

            # 获取当前环境状态
            current_state = state_grid.copy()
            true_next_state = next_state_grid.copy()

            # 预测下一个状态
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)
            env_output, agent_output = model(state_tensor)

            # 计算环境损失
            env_output_flat = env_output.contiguous().view(-1, CELL_FEATURES)

            # 提取真实的单元格状态标签
            true_state_one_hot = torch.zeros((ENV_SIZE * ENV_SIZE, CELL_FEATURES), device=device)
            for cell_idx, cell_type in enumerate(true_next_state.flatten()):
                true_state_one_hot[cell_idx, cell_type] = 1

            true_state_indices = torch.argmax(true_state_one_hot, dim=1)
            batch_env_loss = env_criterion(env_output_flat, true_state_indices)

            # 计算智能体位置损失
            true_agent_movement_tensor = torch.FloatTensor(true_agent_movement).unsqueeze(0).to(device)
            batch_agent_loss = agent_criterion(agent_output, true_agent_movement_tensor)

            # 计算总损失
            batch_loss = env_weight * batch_env_loss + agent_weight * batch_agent_loss

            # 将输出转换为numpy数组
            env_output_np = env_output.squeeze(0).cpu().numpy()
            agent_output_np = agent_output.squeeze(0).cpu().numpy()

            # 预测环境状态
            raw_pred_next_state = np.argmax(env_output_np, axis=-1)

            # 应用静态掩码：结合静态和动态元素
            pred_next_state = current_state.copy()
            dynamic_mask = ~static_mask
            pred_next_state[dynamic_mask] = raw_pred_next_state[dynamic_mask]

            # 预测智能体位置
            # 两种方法：1) 使用预测的绝对位置 2) 使用当前位置+相对移动

            # 方法1：使用预测的绝对位置
            pred_agent_x, pred_agent_y = int(round(agent_output_np[2])), int(round(agent_output_np[3]))

            # 方法2：使用当前位置+相对移动
            current_agent_pos = env.get_agent_position(current_state)
            if current_agent_pos:
                dx, dy = int(round(agent_output_np[0])), int(round(agent_output_np[1]))
                pred_agent_x_rel = current_agent_pos[0] + dx
                pred_agent_y_rel = current_agent_pos[1] + dy

                # 取两种方法中更准确的一个
                true_x, true_y = int(true_agent_position[0]), int(true_agent_position[1])
                err_abs = abs(pred_agent_x - true_x) + abs(pred_agent_y - true_y)
                err_rel = abs(pred_agent_x_rel - true_x) + abs(pred_agent_y_rel - true_y)

                if err_rel < err_abs:
                    pred_agent_x, pred_agent_y = pred_agent_x_rel, pred_agent_y_rel

            # 限制预测位置在有效范围内
            pred_agent_x = max(0, min(pred_agent_x, ENV_SIZE - 1))
            pred_agent_y = max(0, min(pred_agent_y, ENV_SIZE - 1))

            # 确保智能体位置正确
            # 首先清除所有智能体位置
            agent_positions = np.where(pred_next_state == AGENT)
            if len(agent_positions[0]) > 0:
                for ax, ay in zip(agent_positions[0], agent_positions[1]):
                    pred_next_state[ax, ay] = EMPTY

            # 然后在预测位置放置智能体，如果该位置不是障碍物
            if pred_next_state[pred_agent_x, pred_agent_y] != OBSTACLE:
                pred_next_state[pred_agent_x, pred_agent_y] = AGENT

            # 计算整体准确率
            overall_accuracy = np.mean(pred_next_state == true_next_state)

            # 计算静态元素准确率
            static_accuracy = np.mean(pred_next_state[static_mask] == true_next_state[static_mask]) if np.any(
                static_mask) else 1.0

            # 计算智能体绝对位置准确率
            true_agent_x, true_agent_y = int(true_agent_position[0]), int(true_agent_position[1])
            agent_pos_accuracy = 1.0 if (pred_agent_x == true_agent_x and pred_agent_y == true_agent_y) else 0.0

            # 计算智能体相对移动准确率
            true_dx, true_dy = int(true_agent_movement[0]), int(true_agent_movement[1])
            pred_dx, pred_dy = int(round(agent_output_np[0])), int(round(agent_output_np[1]))
            agent_mov_accuracy = 1.0 if (pred_dx == true_dx and pred_dy == true_dy) else 0.0

            # 累加结果
            total_loss += batch_loss.item()
            env_loss += batch_env_loss.item()
            agent_loss += batch_agent_loss.item()
            env_acc += overall_accuracy
            static_acc += static_accuracy
            agent_pos_acc += agent_pos_accuracy
            agent_mov_acc += agent_mov_accuracy
            count += 1

            # 可视化一些预测
            if i < num_visualizations:
                visualize_prediction(
                    true_next_state,
                    pred_next_state,
                    action,
                    save_path=f"visualizations/prediction_{i}.png"
                )

    # 计算平均指标
    total_loss /= count
    env_loss /= count
    agent_loss /= count
    env_acc /= count
    static_acc /= count
    agent_pos_acc /= count
    agent_mov_acc /= count

    print(f"Test Results: Total Loss: {total_loss:.4f} (Env: {env_loss:.4f}, Agent: {agent_loss:.4f}), "
          f"Overall Acc: {env_acc:.4f}, Static Elements: {static_acc:.4f}, "
          f"Agent Position: {agent_pos_acc:.4f}, Agent Movement: {agent_mov_acc:.4f}")

    return env_acc, static_acc, agent_pos_acc, agent_mov_acc


def run_test_episodes(model, num_episodes=5, steps_per_episode=20):
    """运行测试episodes查看模型表现"""
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.eval()

    # 创建episodes可视化目录
    os.makedirs("episode_visualizations", exist_ok=True)

    # 加载静态掩码
    static_mask = np.load('static_mask.npy')

    # 运行episodes
    for episode in range(num_episodes):
        # 创建环境 - 使用相同的随机种子，确保环境一致
        env = MazeEnvironment(seed=RANDOM_SEED)

        # 可视化初始状态
        initial_state = env.get_state()
        visualize_grid(
            initial_state,
            title=f"Episode {episode + 1} - Initial State",
            save_path=f"episode_visualizations/episode_{episode + 1}_step_0.png"
        )

        # 运行步骤
        for step in range(steps_per_episode):
            # 获取当前状态
            current_state = env.get_state().copy()

            # 创建增强状态输入
            state_input = create_enhanced_state_input(current_state)

            # 选择随机动作
            action = np.random.randint(0, 4)  # 只使用UP, DOWN, LEFT, RIGHT

            # 获取真实下一个状态
            true_next_state = env.get_next_state_after_action(action).copy()

            # 预测下一个状态
            state_tensor = torch.FloatTensor(state_input).unsqueeze(0).to(device)
            with torch.no_grad():
                # 使用增强世界模型
                env_output, agent_output = model(state_tensor)

                # 处理环境状态预测
                env_output = env_output.squeeze(0).cpu().numpy()
                raw_pred_next_state = np.argmax(env_output, axis=-1)

                # 应用静态掩码
                pred_next_state = current_state.copy()
                dynamic_mask = ~static_mask
                pred_next_state[dynamic_mask] = raw_pred_next_state[dynamic_mask]

                # 获取当前智能体位置
                current_agent_pos = env.get_agent_position(current_state)

                # 处理智能体位置预测
                agent_pos = agent_output.squeeze(0).cpu().numpy()

                # 选择绝对位置或相对移动
                if current_agent_pos:
                    # 绝对位置预测
                    abs_agent_x, abs_agent_y = int(round(agent_pos[2])), int(round(agent_pos[3]))

                    # 相对移动预测
                    dx, dy = int(round(agent_pos[0])), int(round(agent_pos[1]))
                    rel_agent_x = current_agent_pos[0] + dx
                    rel_agent_y = current_agent_pos[1] + dy

                    # 评估哪种方法更准确
                    true_agent_pos = np.where(true_next_state == AGENT)
                    if len(true_agent_pos[0]) > 0:
                        true_x, true_y = true_agent_pos[0][0], true_agent_pos[1][0]
                        err_abs = abs(abs_agent_x - true_x) + abs(abs_agent_y - true_y)
                        err_rel = abs(rel_agent_x - true_x) + abs(rel_agent_y - true_y)

                        if err_rel <= err_abs:
                            agent_x, agent_y = rel_agent_x, rel_agent_y
                        else:
                            agent_x, agent_y = abs_agent_x, abs_agent_y
                    else:
                        # 默认使用相对移动 (通常更准确)
                        agent_x, agent_y = rel_agent_x, rel_agent_y
                else:
                    agent_x, agent_y = int(round(agent_pos[2])), int(round(agent_pos[3]))

                # 限制预测位置在有效范围内
                agent_x = max(0, min(agent_x, ENV_SIZE - 1))
                agent_y = max(0, min(agent_y, ENV_SIZE - 1))

                # 清除所有智能体位置
                agent_positions = np.where(pred_next_state == AGENT)
                if len(agent_positions[0]) > 0:
                    for ax, ay in zip(agent_positions[0], agent_positions[1]):
                        pred_next_state[ax, ay] = EMPTY

                # 放置智能体在预测位置，如果该位置不是障碍物
                if pred_next_state[agent_x, agent_y] != OBSTACLE:
                    pred_next_state[agent_x, agent_y] = AGENT

            # 执行动作
            env.step(action)

            # 计算准确率
            # 找到智能体位置
            true_agent_pos = np.where(true_next_state == AGENT)
            pred_agent_pos = np.where(pred_next_state == AGENT)

            agent_acc = 0.0
            if len(true_agent_pos[0]) > 0 and len(pred_agent_pos[0]) > 0:
                true_x, true_y = true_agent_pos[0][0], true_agent_pos[1][0]
                pred_x, pred_y = pred_agent_pos[0][0], pred_agent_pos[1][0]
                agent_acc = 1.0 if (true_x == pred_x and true_y == pred_y) else 0.0

            overall_acc = np.mean(pred_next_state == true_next_state)
            print(f"Episode {episode + 1}, Step {step + 1}: Overall Acc: {overall_acc:.4f}, Agent Acc: {agent_acc:.4f}")

            # 可视化真实状态和预测
            visualize_prediction(
                true_next_state,
                pred_next_state,
                action,
                save_path=f"episode_visualizations/episode_{episode + 1}_step_{step + 1}.png"
            )