import copy
import numpy as np
from gym import Env
import matplotlib.pyplot as plt

from models import DQN

import mindspore as ms


class Agent(Env):
    def __init__(self, gamma, lr=1e-3, batch=256, n=1) -> None:
        super().__init__()

        self.gamma = gamma
        self.batch = batch
        self.n = n
        self.lr = lr

        self.bound = [0, 7, 0, 10]
        s = int((self.bound[1] - 1) / 2)
        self.state = np.array([s, 0], dtype=np.int32)

        self.end = np.array([s, self.bound[3] - 3], np.int32)

        self.env = np.array([0, 0, 0, 1, 1, 1, 2, 2, 1, 0], np.int32)
        # self.env = np.zeros(10,np.int32)

        self.real_intention = np.array(
            [[-1, 0], [1, 0], [0, -1], [0, 1]], dtype=np.int32)
        self.T = 0
        # 价值函数和动作函数以及策略
        self.v = np.zeros(self.bound[1] * self.bound[3], dtype=np.float32)
        self.pi = np.ones(4, dtype=np.float32) / 4
        self.q = np.zeros((self.bound[1] * self.bound[3], 4), np.float32)
        self.q[self.get_idx(self.end), :] = 1

        self.Q = DQN(3)
        self.tar = DQN(3)
        # generate_batch

    def get_idx(self, state):

        return state[0] * self.bound[3] + state[1]

    def reset(self):
        s = int((self.bound[1] - 1) / 2)
        self.state = np.array([s, 0], dtype=np.int32)
        self.T = 0

        return self.state

    def cal(self, rewards, states, actions, t=0):

        e = min(self.n, self.T - t)
        g = 0.0

        for i in range(e):
            g += (self.gamma ** i) * rewards[t + i]

        if e == self.n:
            state = copy.deepcopy(states[self.n + t])
            state.append(actions[t + self.n])

            idx1 = self.get_idx(state)
            idx2 = actions[t + self.n]
            g += (self.gamma ** (self.n))*self.q[idx1, idx2]
            # g += (self.gamma ** (self.n))*self.tar(state)

        return g

    def cal_dqn(self, data: ms.Tensor, reward: ms.Tensor, actions):

        acts = ms.Tensor(actions, ms.int32)
        idcs = list(range(self.batch))
        x1 = self.gamma * self.tar(data)[idcs, acts]
        return  x1 + reward

    def esarsa(self, rewards, states, t=0):
        e = min(self.n, self.T - t)
        g = 0.0
        for i in range(e):
            g += (self.gamma ** i) * rewards[t + i]

        if e == self.n:
            state = states[t + self.n]
            idx = self.get_idx(state)

            g += (self.gamma ** self.n) * \
                np.mean(
                    (np.exp(self.q[idx, :]) / np.exp(self.q[idx, :]).sum()) * self.q[idx, :])

        return g

    def step(self, action):
        flag = False
        r = -1
        self.state[0] -= self.env[self.state[1]]
        self.state += (self.real_intention[action])
        self.state[0] = np.clip(
            self.state[0], self.bound[0], self.bound[1] - 1)
        self.state[1] = np.clip(
            self.state[1], self.bound[2], self.bound[3] - 1)

        if (self.state == self.end).all():
            flag = True
            # r = 0

        return (self.state, r, flag, None)

    def select_action(self, state, e=0.1):
        idx = self.get_idx(state)
        p = np.random.rand()

        maxa = np.max(self.q[idx, :])

        alter_as = np.where(self.q[idx, :] == maxa)[0]
        if p < e:
            a1 = list(set(range(4)) - set(alter_as))
            if len(a1) == 0:
                a = np.random.choice(alter_as)

            else:
                a = np.random.choice(a1)

        else:
            a = np.random.choice(alter_as)

        return a

    def updata(self, state, action, g):
        idx = self.get_idx(state)
        self.q[idx, action] += self.lr * (g - self.q[idx, action])


    def pack(self, states, actions, t = 0):
        res = []
        for i in range(self.batch + 1):
            one = [states[t+i][0], states[t+i][1], actions[t+i]]
            res.append(one)

        return ms.Tensor(res, ms.float32)


    def run(self, epoch):

        l = 1e+8
        records = []
        for i in range(epoch):
            self.reset()
            states = [list(self.state)]

            a = self.select_action(states[-1])
            actions = [a]
            rewards = []
            t = 0

            lst = list(self.state)
            lst.append(a)
            one = [copy.deepcopy(lst)]
            idx = 0
            while True:
                # 根据所处状态选择动作
                # idx = self.get_idx(states[-1])
                # 执行动作
                infos = self.step(a)
                rewards.append(infos[1])
                states.append(list(infos[0]))

                a = self.select_action(states[-1])
                actions.append(a)
                self.T += 1

                lst = copy.deepcopy(states[-1])
                lst.append(a)
                one.append(copy.deepcopy(lst))

                if len(states) >= self.batch+1:
                    ds = self.pack(states, actions,idx)
                    data = ds[1:, :]
                    targets = self.cal_dqn(data, ms.Tensor(
                        rewards[idx:], dtype=ms.float32), actions[idx+1:])

                    idcs = list(range(self.batch))
                    out = self.Q(ds[:-1, :])[idcs, actions[idx+1:]]
                    print(targets.shape)
                    print(out.shape)
                    one = []
                    idx += 1

                # if self.T >= self.n:
                #     g = self.cal(rewards, states, t)
                #     self.updata(states[t], actions[t], g)
                #     t += 1

                if infos[2] == True:
                    break
            # for j in range(t, self.T):
            #     g = self.cal(rewards, states, j)
            #     self.updata(states[j], actions[j], g)

            records.append(len(actions))
            # if len(actions) < l:
            # l = len(actions)
            # print("epoch = [{:3d} / {:3d}]行动步骤数量为:{:5d}".format(i+1, epoch, l))

        return records

    def __call__(self, epoch):
        records = self.run(epoch)
        plt.plot(records)
        plt.show()


if __name__ == "__main__":
    m = Agent(0.9, 0.5, n=8)(1)
