import random
from collections import deque
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import gymnasium as gym

from light_rl.utils.utils import load_config, get_absolute_path, get_now_time, get_device, build_mlp, set_seed
from light_rl.agent.agent_dqn import AgentDQN, AgentDQN_OffPolicy
from light_rl.model.model import Model
from light_rl.utils.buffer import create_reply_buffer


class Net(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dims):
        super(Net, self).__init__()
        self.net = build_mlp([state_dim, *hidden_dims, action_dim])

    def forward(self, x):
        return self.net(x)


# 加载配置
config_path = get_absolute_path(__file__, "./config/config.yaml")
config = load_config(config_path)
model_path = get_absolute_path(__file__, f"{config.model_dir}/{get_now_time()}")

set_seed(config.seed)

# 创建环境
env = gym.make("CartPole-v1")

device = get_device(config.device)
# 创建网络、优化器、模型
net = Net(config.env.state_dim, config.env.action_dim, config.model.hidden_dims).to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=config.model.learning_rate)
model = Model(net, optimizer, torch.nn.MSELoss())

# 创建tensorboard
writer = SummaryWriter(get_absolute_path(__file__, f"{config.log_dir}/{get_now_time()}"))

# 创建agent
agent = AgentDQN_OffPolicy(model, config_path,writer=writer, model_dir=model_path)
# agent = AgentDQN(model, buffer, config_path)
# 还原训练场景
# agent.load(model_path)

reward_collection = deque(maxlen=config.train.reward_collection_size)
# 训练
for i in range(config.train.episode):
    state, _ = env.reset()
    episode_reward = 0
    while True:
        action = agent.react(state=state)
        next_state, reward, terminated, truncated, _ = env.step(action)
        episode_reward += reward
        done = terminated or truncated
        agent.learn(state, action, reward, next_state, done)
        state = next_state
        if done:
            break
    writer.add_scalar("reward", episode_reward, i)
    print(f"Episode {i} reward: {episode_reward}")
    # 判断是否训练完成
    reward_collection.append(episode_reward)
    if np.mean(reward_collection) > config.train.reward_threshold:
        break

agent.end_save()

# 测试
env = gym.make("CartPole-v1", render_mode="human")
for i in range(config.test.episode):
    state, _ = env.reset()
    episode_reward = 0
    while True:
        action = agent.react(state, train=False)
        next_state, reward, terminated, truncated, _ = env.step(action)
        episode_reward += reward
        state = next_state
        done = terminated or truncated
        if done:
            break
    print(f"Episode {i} reward: {episode_reward}")
