from collections import deque
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import gymnasium as gym
from copy import deepcopy

from light_rl.utils.utils import load_config, get_absolute_path, get_now_time, get_device, build_mlp, set_seed
from light_rl.utils.buffer import create_reply_buffer
from light_rl.agent.agent_td3 import AgentTD3
from light_rl.model.model import Model


class ActorNet(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dims, action_bound):
        super(ActorNet, self).__init__()
        self.action_bound = action_bound
        self.net = build_mlp([state_dim, *hidden_dims, action_dim])

    def forward(self, x):
        x = self.net(x)
        a = nn.Tanh()(x) * self.action_bound
        return a


class CriticNet(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dims):
        super(CriticNet, self).__init__()
        self.net = build_mlp([state_dim, *hidden_dims, action_dim])

    def forward(self, x):
        return self.net(x)


# 加载配置
config_path = get_absolute_path(__file__, "./config/config.yaml")
config = load_config(config_path)
model_path = get_absolute_path(__file__, config.model_dir)

set_seed(config.seed)

# 创建环境
env = gym.make("InvertedPendulum-v4")

# 创建网络、优化器、模型
device = get_device(config.device)

actor_net = ActorNet(config.env.state_dim, config.env.action_dim, config.model.actor.hidden_dims,
                     config.env.action_bound).to(device)
actor_optimizer = torch.optim.Adam(actor_net.parameters(), lr=config.model.actor.learning_rate)
actor_model = Model(actor_net, actor_optimizer)

critic_net = CriticNet(config.env.state_dim + config.env.action_dim, 1, config.model.critic.hidden_dims).to(device)
critic_optimizer = torch.optim.Adam(critic_net.parameters(), lr=config.model.critic.learning_rate)
critic_model = Model(critic_net, critic_optimizer, torch.nn.MSELoss())
critic2_model = deepcopy(critic_model)
# 创建agent
agent = AgentTD3(actor_model, critic_model, critic2_model, config_path)
# 还原训练场景
# agent.load(model_path)

# 创建tensorboard
writer = SummaryWriter(get_absolute_path(__file__, f"logs/{get_now_time()}"))
reward_collection = deque(maxlen=config.train.reward_collection_size)
step = 0
# 训练
for i in range(config.train.episode):
    state, _ = env.reset()
    state = state.astype(np.float32)
    episode_reward = 0
    while True:
        action = agent.react(state=state)
        next_state, reward, terminated, truncated, _ = env.step(action)
        next_state = next_state.astype(np.float32)
        episode_reward += reward
        done = terminated or truncated
        agent.learn(state, action, reward, next_state, done)
        state = next_state
        step += 1
        writer.add_scalar("action", action, step)
        if done:
            break
    reward_collection.append(episode_reward)
    writer.add_scalar("reward", episode_reward, i)

    print(f"Step {step} Episode {i} reward: {episode_reward} mean reward: {np.mean(reward_collection)}")
    if np.mean(reward_collection) > config.train.reward_threshold:
        break

# 保存训练场景
agent.save(model_path)

# 测试
env = gym.make("InvertedPendulum-v4", render_mode="human")
for i in range(config.test.episode):
    state, _ = env.reset()
    state = state.astype(np.float32)
    episode_reward = 0
    while True:
        action = agent.react(state, train=False)
        next_state, reward, terminated, truncated, _ = env.step(action)
        next_state = next_state.astype(np.float32)
        episode_reward += reward
        state = next_state
        done = terminated or truncated
        if done:
            break
    print(f"Episode {i} reward: {episode_reward}")
