import matplotlib.pyplot as plt
import gymnasium as gym
from utils import MLP,ReplayBuffer
from dqn import DQN
import torch

def train(env, agent):
    print("开始训练！")
    rewards = []  # 记录所有回合的奖励
    steps = []
    for i_ep in range(300):
        ep_reward = 0  # 记录一回合内的奖励
        ep_step = 0
        state = env.reset()  # 重置环境，返回初始状态
        state=state[0]
        for _ in range(5000):
            ep_step += 1
            action = agent.sample_action(state)
            next_state, reward, terminated, truncated,_ = env.step(action)
            agent.buffer.push(state, action, reward,next_state,terminated, truncated)
            state = next_state  # 更新下一个状态
            agent.update()  # 更新智能体 
            ep_reward += reward  # 累加奖励
            if terminated or truncated:
                break
        agent.target_net.load_state_dict(agent.policy_net.state_dict())
        steps.append(ep_step)
        rewards.append(ep_reward)
        if(i_ep%50==0): 
            print(f"回合：{i_ep+1}/{200}，奖励：{ep_reward:.2f}，Epislon：{agent.epsilon:.3f}")
    print("完成训练！")
    env.close()
    return {'rewards':rewards}


def test(env, agent):
    print("开始测试！")
    rewards = []  # 记录所有回合的奖励
    steps = []
    t=0
    for i_ep in range(20):
        ep_reward = 0  # 记录一回合内的奖励
        ep_step = 0
        state = env.reset()  # 重置环境，返回初始状态
        state=state[0]
        for _ in range(200):
            ep_step+=1
            action = agent.predict_action(state)  # 选择动作
            next_state, reward, terminated, truncated,_ = env.step(action)  # 更新环境，返回transition
            state = next_state  # 更新下一个状态
            ep_reward += reward  # 累加奖励
            if terminated or truncated:
                break
        steps.append(ep_step)
        rewards.append(ep_reward)
        t+=ep_reward
        print(f"回合：{i_ep+1}/{20}，奖励：{ep_reward:.2f}")
    print("完成测试，平均奖励为：",t/20)
    env.close()
    return {'rewards':rewards}

def plot_rewards(r1,r2):
    plt.figure()
    plt.title(f"train and testDQN for CartPole-v1")
    plt.xlabel('epsiodes')
    plt.plot(r1, label='train')
    plt.plot(r2,label='test')
    plt.legend()
    plt.savefig('/home/WangHaobin/DeepRL/codes/DQN/ddqn_epochs_300_steps_5000.png')
    #plt.show()

if __name__=="__main__":
    torch.manual_seed(3407)
    env=gym.make("CartPole-v1")
    n_actions=env.action_space.n
    n_states=env.observation_space.shape[0]
    model=MLP(n_states,n_actions)
    buffer_replay=ReplayBuffer(100000,n_states)
    agent=DQN(model,buffer_replay,n_actions,0.95,0.95,0.01,500,100,0.0001)
    res_dic= train(env, agent)
    res_dic_test=test(env,agent)
    plot_rewards(res_dic['rewards'],res_dic_test['rewards'])
