import torch
import numpy as np
import net


class dqn(object):

    def __init__(self):
        self.STATES_NUM = 10*10+2+2
        self.ACTION_NUM = 4
        self.LEARN_RATE = 0.001
        self.EPSILON = 0.9
        self.GAMMA = 0.9

        self.BATCH_SIZE = 5000
        self.TNET_REPLACE_ITER = 6000

        self.q_net = net.NET(self.STATES_NUM, self.ACTION_NUM)
        self.t_net = net.NET(self.STATES_NUM, self.ACTION_NUM)
        self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=self.LEARN_RATE)
        self.loss_func = torch.nn.MSELoss()

        self.DS_SIZE = 100000
        self.ds_count = 0
        self.ds_st_learn = 0
        self.ds_pool = np.zeros((self.DS_SIZE, self.STATES_NUM * 2 + 2))

        self.learn_step = 0

    def choose_action(self, x):
        if self.ds_st_learn == 0:
            action = np.random.randint(0, self.ACTION_NUM)
            return action

        epsilon = (1 - self.EPSILON) * (self.learn_step / 200000)

        if np.random.uniform() < (self.EPSILON + epsilon):
            x = torch.autograd.Variable(torch.unsqueeze(torch.FloatTensor(x), 0))
            action_value = self.q_net.forward(x)
            action = torch.max(action_value, 1)[1].data.numpy()[0]
        else:
            action = np.random.randint(0, self.ACTION_NUM)
        return action

    def store_ds(self, s, a, r, s_):
        d = np.hstack((s, [a, r], s_))
        self.ds_pool[self.ds_count, :] = d
        self.ds_count += 1
        if self.ds_count >= self.DS_SIZE:
            self.ds_count = 0
            self.ds_st_learn = 1

    def learn(self):
        self.learn_step += 1

        if self.learn_step % self.TNET_REPLACE_ITER == 0:
            self.t_net.load_state_dict((self.q_net.state_dict()))

        batch_index = np.random.choice(self.DS_SIZE, self.BATCH_SIZE)
        batch_ds = self.ds_pool[batch_index, :]

        b_s = torch.autograd.Variable(torch.FloatTensor(batch_ds[:, : self.STATES_NUM]))
        b_a = torch.autograd.Variable(torch.FloatTensor(batch_ds[:, self.STATES_NUM: self.STATES_NUM+1]))
        b_r = torch.autograd.Variable(torch.FloatTensor(batch_ds[:, self.STATES_NUM+1: self.STATES_NUM+2]))
        b_s_ = torch.autograd.Variable(torch.FloatTensor(batch_ds[:, -self.STATES_NUM:]))

        q = self.q_net(b_s).gather(1, b_a.to(torch.int64))
        q_ = self.t_net(b_s_).detach()
        q_t = b_r + self.GAMMA * torch.reshape(q_.max(1)[0], (self.BATCH_SIZE, 1))
        loss = self.loss_func(q, q_t)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
