import torch.nn as nn
import torch.nn.functional as F
import torch
import gym
import numpy as np
import torch.optim as optim
import random
import collections

env = gym.make('CartPole-v0')
env = env.unwrapped
N_ACTIONS = env.action_space.n
N_STATES = env.observation_space.shape[0]
lr = 0.01
gamma = 0.9
greedy = 1
Max_memory = 2000
batch_size = 32

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.l1 = nn.Linear(N_STATES, 64)
        self.l2 = nn.Linear(64, 32)
        self.predict = nn.Linear(32, N_ACTIONS)

    def forward(self, x):
        out = self.l1(x)
        out = F.relu(out)
        out = self.l2(out)
        out = F.relu(out)
        return self.predict(out)


class ReplayMemory():
    def __init__(self, max_size, batch_size):
        self.max_size = max_size
        self.buffer = collections.deque(maxlen=max_size)
        self.batch_size = batch_size

    def append(self, exp):
        self.buffer.append(exp)

    def sample(self):
        mini_batch = random.sample(self.buffer, self.batch_size)
        s_mini_batch, a_mini_batch, r_mini_batch, ss_mini_batch = [], [], [], []
        for exp in mini_batch:
            s, a, r, ss = exp
            s_mini_batch.append(s)
            a_mini_batch.append(a)
            r_mini_batch.append(r)
            ss_mini_batch.append(ss)

        return torch.FloatTensor(np.array(s_mini_batch)), \
               torch.LongTensor(np.array(a_mini_batch)).view(-1,1), \
               torch.FloatTensor(np.array(r_mini_batch)).view(-1,1), \
               torch.FloatTensor(np.array(ss_mini_batch))

    def __len__(self):
        return len(self.buffer)

class DQN():
    def __init__(self):
        self.eval_net, self.target_net = Net(), Net()
        self.learn_step_counter = 0  # learn的次数
        self.Target_replace_iter = 100  # 每learn100次更新target网络
        self.optimizer = optim.Adam(self.eval_net.parameters(), lr=lr)
        self.loss_func = nn.MSELoss()

    def choose_action(self, s):
        s = torch.FloatTensor(s)
        if np.random.uniform() < greedy:
            actions_value = self.eval_net.forward(s)
            action = torch.max(actions_value, 0)[1].data.numpy()
        else:
            action = np.random.randint(0, N_ACTIONS)
        return action

    def learn(self):
        if self.learn_step_counter % self.Target_replace_iter == 0:
            self.target_net.load_state_dict(self.eval_net.state_dict())
        self.learn_step_counter += 1
        s, a, r, ss = memory.sample()

        q_eval = self.eval_net(s).gather(1, a)
        q_next = self.target_net(ss).detach()
        q_target = r + gamma * q_next.max(1)[0].view(batch_size, 1)

        loss = self.loss_func(q_eval, q_target)
        # 梯度归零
        self.optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 更新权重参数
        self.optimizer.step()

memory = ReplayMemory(Max_memory, batch_size)
dqn = DQN()
print('\nCollecting experience...')
for i_episode in range(400):
    s = env.reset()
    ep_r = 0
    while True:
        env.render()
        a = dqn.choose_action(s)

        s_, r, done, info = env.step(a)

        x, x_dot, theta, theta_dot = s_
        r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
        r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
        r = r1 + r2

        memory.append([s, a, r, s_])

        ep_r += r
        if len(memory.buffer) >= memory.max_size:
            dqn.learn()
            if done:
                print('Ep: ', i_episode,'| Ep_r: ', round(ep_r, 2))
        if done:
            break
        s = s_












