import os
import time
from datetime import datetime

import numpy as np
import torch
from tqdm import tqdm

from agents.dqn_agent import DQNAgent
from game_engine import MultiProcessVectorizedEnv, WatermelonEnv


def preprocess_observation(observation):
    """预处理观察，转换为(C, H, W)格式"""
    return np.transpose(observation, (2, 0, 1)) / 255.0


def train_dqn(
    episodes=1000,
    render_every=100,
    save_path="models",
    render_mode="rgb_array",
    frame_skip=1,
    num_envs=4,
):
    """训练DQN智能体，使用多进程并行环境加速数据收集"""
    # 创建保存路径
    os.makedirs(save_path, exist_ok=True)

    # 环境创建函数
    def make_env():
        return WatermelonEnv(render_mode=render_mode, frame_skip=frame_skip)

    # 创建多进程向量化环境
    vec_env = MultiProcessVectorizedEnv(make_env, num_envs=num_envs)

    # 获取状态和动作空间信息
    observations, _ = vec_env.reset()
    state_shape = (12, observations[0].shape[0], observations[0].shape[1])  # (C, H, W)
    n_actions = make_env().action_space.n  # 从单个环境获取动作空间大小

    # 创建智能体
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")
    agent = DQNAgent(state_shape, n_actions, device)

    # 修改超参数以适应并行环境
    agent.batch_size = max(32, 8 * num_envs)  # 增大批次大小

    # 训练参数
    update_target_every = 1000  # 更新目标网络的频率
    log_interval = 100  # 日志记录间隔
    save_interval = 50  # 模型保存间隔

    # 训练指标
    all_rewards = []
    episode_rewards = [0] * num_envs
    episode_lengths = [0] * num_envs
    completed_episodes = 0

    # 开始训练
    total_steps = 0
    start_time = time.time()

    # 初始状态
    states = observations

    # 使用tqdm进度条
    pbar = tqdm(total=episodes, desc="Training Progress")

    # 继续训练直到完成足够的情节
    while completed_episodes < episodes:
        # 为每个环境选择动作
        actions = [agent.select_action(state) for state in states]

        # 在并行环境中执行动作
        new_observations, rewards, terminateds, truncateds, infos = vec_env.step(
            actions
        )

        # 处理每个环境的结果
        next_states = []
        for i in range(num_envs):
            # 累积奖励和步长
            episode_rewards[i] += rewards[i]
            episode_lengths[i] += 1

            # 准备下一个状态 - 移除预处理，直接使用
            next_state = new_observations[i] if not terminateds[i] else None
            next_states.append(next_state)

            # 存储转换到回放缓冲区
            agent.memory.push(
                states[i], actions[i], next_state, rewards[i], terminateds[i]
            )

            # 检查是否结束
            if terminateds[i] or truncateds[i]:
                # 记录完成的情节数据
                all_rewards.append(episode_rewards[i])
                completed_episodes += 1
                pbar.update(1)

                # 立即重置该环境
                if completed_episodes < episodes:  # 确保不超过总情节数
                    reset_obs, _ = vec_env.reset_one(i)  # 重置单个环境
                    new_observations[i] = reset_obs
                    next_states[i] = reset_obs  # 更新next_state
                    episode_rewards[i] = 0
                    episode_lengths[i] = 0

                    # 如果到达日志间隔，打印统计信息
                    if completed_episodes % log_interval == 0:
                        avg_reward = np.mean(all_rewards[-num_envs * log_interval :])
                        elapsed_time = time.time() - start_time
                        memory_usage = len(agent.memory) / agent.memory.capacity * 100

                        print(
                            f"Episode {completed_episodes}/{episodes} - "
                            f"Avg Reward: {avg_reward:.2f}, "
                            f"Steps: {total_steps}, "
                            f"Memory: {memory_usage:.1f}%, "
                            f"Time: {elapsed_time:.2f}s"
                        )

                    # 如果到达保存间隔，保存模型
                    if completed_episodes % save_interval == 0:
                        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                        model_path = os.path.join(
                            save_path,
                            f"dqn_watermelon_ep{completed_episodes}_{timestamp}.pth",
                        )
                        agent.save(model_path)
                        print(f"Model saved to {model_path}")

        # 更新状态
        states = [s if s is not None else states[i] for i, s in enumerate(next_states)]

        # 优化模型
        loss = agent.optimize_model()

        # 定期更新目标网络
        total_steps += num_envs
        if total_steps % update_target_every < num_envs:
            agent.update_target_network()
            print(f"Updated target network at step {total_steps}")

        # 更新进度条信息
        pbar.set_postfix(
            {
                "reward": np.mean(episode_rewards),
                "loss": loss if loss else "N/A",
                "epsilon": agent.eps_end
                + (agent.eps_start - agent.eps_end)
                * np.exp(-1.0 * agent.steps_done / agent.eps_decay),
            }
        )

    pbar.close()

    # 保存最终模型
    final_model_path = os.path.join(save_path, "dqn_watermelon_final.pth")
    agent.save(final_model_path)
    print(f"Final model saved to {final_model_path}")

    # 关闭环境
    vec_env.close()

    return agent, all_rewards
