import copy
import pickle
import random
import numpy as np
import gym
import torch
import torch.nn as nn
import torch as t
from torch.nn import functional as F
import matplotlib.pyplot as plt
import os
from mcts import mcts_,softmax
import pandas as pd
import openpyxl
from CartPole_v1_Step import CartPoleStep
from Replay_Buffer import Memory
from plotTree import plotTree
from controller_init import controller_init_

# env.close()
plt.rcParams['font.sans-serif'] = ['KaiTi', 'SimHei', 'FangSong']  # 汉字字体,优先使用楷体，如果找不到楷体，则使用黑体
plt.rcParams['font.size'] = 12  # 字体大小
plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"

criterion = nn.CrossEntropyLoss()
dtype = np.dtype(mcts_)

# 定义DQN神经网络
class DQNNet(nn.Module):
    def __init__(self, dim):
        super(DQNNet, self).__init__()

        self.fc1 = nn.Linear(dim, 30)
        # self.bn1 = nn.BatchNorm1d(30).cuda()
        self.fc2 = nn.Linear(30, 4)
        # self.bn2 = nn.BatchNorm1d(4).cuda()
        self.fc3 = nn.Linear(4, 1)

    def forward(self, x):
        # x = self.bn1(F.relu(self.fc1(x)))
        # x = self.bn2(F.relu(self.fc2(x)))
        if len(x.shape) == 1:
            x = np.reshape(x, (1, x.shape[0]))
        out = torch.from_numpy(x).float()
        out = F.relu(self.fc1(out))
        out = F.relu(self.fc2(out))
        out = self.fc3(out)
        return out

    def num_float_features(self, x):
        size = x.size()[1:]
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

def Dqn_optimizer(pred_net, batch_X, target_Y, IsWeights):
    # 计算预测值
    pred_Y = pred_net(batch_X)

    # 计算均方误差
    target_Y = np.reshape(target_Y,(pred_Y.shape[0],1))
    IsWeights = np.reshape(IsWeights,(pred_Y.shape[0],1))
    target_Y = torch.tensor(target_Y, dtype=torch.float32)
    IsWeights = torch.tensor(IsWeights, dtype=torch.float32)
    loss = torch.sum(torch.mul(IsWeights, torch.square(pred_Y-target_Y)))/pred_Y.shape[0]

    # 梯度下降
    optimizer = torch.optim.Adam(pred_net.parameters(), weight_decay=pow(10, -6))
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    return loss.item()

class Share_layer(nn.Module):
    def __init__(self, state_dim):

        super(Share_layer, self).__init__()
        self.linear1 = nn.Linear(state_dim, 20)
        nn.init.normal_(self.linear1.weight, 0, 0.1)
        nn.init.constant_(self.linear1.bias, 0.1)

    def forward(self, out):

        out = self.linear1(out)
        out = F.leaky_relu(out)

        return out



class Actor(nn.Module):

    def __init__(self, sl, action_discrete_value):

        super(Actor, self).__init__()

        self.share_layer = sl
        # self.linear1 = nn.Linear(state_dim,20)

        self.linear2 = nn.Linear(20, action_discrete_value)
        # nn.init.normal_(self.linear2.weight, 0, 0.1)
        # nn.init.constant_(self.linear2.bias, 0.1)

    def forward(self, x):
        if len(x.shape) == 1:
            x = np.reshape(x,(1, x.shape[0]))
        out = t.from_numpy(x).float()

        out = self.share_layer(out)

        # out = F.leaky_relu(self.linear1(out))
        out = F.softmax(self.linear2(out), dim = 1) #这个输出主要是用来使用概率来挑选动作

        return out


class Critic(nn.Module):

    def __init__(self, sl):

        super(Critic, self).__init__()

        self.share_layer = sl

        # self.linear1 = nn.Linear(state_dim, 20)

        self.linear2 = nn.Linear(20, 1)
        # nn.init.normal_(self.linear2.weight, 0, 0.1)
        # nn.init.constant_(self.linear2.bias, 0.1)

    def forward(self, x):
        if len(x.shape) == 1:
            x = np.reshape(x,(1, x.shape[0]))
        out = t.from_numpy(x).float()

        out = self.share_layer(out)
        # out = F.leaky_relu(self.linear1(out))

        out = self.linear2(out)


        return out

class ac_controller():
    def __init__(self, config_name):
        self.env = ''
        self.conf = {}
        assert controller_init_(self, config_name) == True
        self.env.reset()
        self.dim = [self.env.state.shape[0],self.env.action_space.n]


    #下面该函数用来选择动作
    def choose_action(self, prob, action_discrete_value):
        # print(prob)
        if isinstance(prob,np.ndarray):
            prob = prob
        else:
            prob = prob.detach().numpy()
            prob = np.reshape(prob,(max(prob.shape)))
        action = np.random.choice(a = action_discrete_value, p = prob)
        return action

        # action = np.random.choice(a = 6, p = prob[0].detach().numpy())
        # v = random.uniform(0, 1)
        # p, index = t.topk(prob, 1, dim = 1)

        # #下面开始eplison-greedy 算法
        # if v > eplison:
        #     #这里是求最大的状态价值函数对应的动作
        #     action = index[0][0].item()
        # else:
        #     #下面是随机产生动作
        #     action = random.randint(0, 1)

        # return action

    #下面的函数主要用来计算Actor部分训练的损失函数
    def Actor_learn(self, optim, critic, V_s_, s, s_, a, r, prob, IsWeight):

        '''
        根据当前的状态，奖励以及下一个时间步的章台完成损失函数的计算
        Parameters
        ----------
        critic : Critic（）实例
            用来估计当前时间的奖励的期望
        s : float array
            当前时间步的状态.
        a : int scalar
            当前时间步根据当前状态s所采取的动作
        r : float scalar
            当前时间步的奖励.
        s_ : float array
            下一个时间步的状态.
        logits : actor网络最后一层的输出
            用来计算交叉熵损失

        Returns
        -------
        Actor 的损失.

        '''
        with t.no_grad():
            V_s = critic(s)
        #开始计算动作对应的交叉熵
        a = t.tensor([a]).long()
        logp_a = criterion(prob, a)

        l = IsWeight*(r + self.conf.gamma * V_s_ - V_s)
        l = l.item()
        loss = l * logp_a
        # print('Actor loss is :', loss)
        optim.zero_grad()

        loss.backward()
        optim.step()

        return loss


    def Critic_learn(self, optim, critic, V_s_, s, r, s_, IsWeight):

        '''
        用来计算Critic网络的损失，来更新Critic网络
        Parameters
        ----------
        critic : Critic实例
            用来计算各个时间步的值函数.
        s : float array
            当前时间步的状态.
        r : float scalar
            当前时间步的奖励.
        s_ : float array
            下一个时间步的状态.

        Returns
        -------
        Critic网络的损失.
        '''

        V_s = critic(s)

        loss = IsWeight * (r + self.conf.gamma * V_s_- V_s)**2
        optim.zero_grad()
        loss.backward()

        optim.step()

        return loss

    def classify(self, actor):
        state = self.env.reset()
        done = False
        step = 0

        while not done:
            step += 1

            # env.render()
            state = np.expand_dims(state, axis=0)
            # 下面开始选择动作
            with t.no_grad():
                prob = actor(state)
            action = prob.argmax().detach().numpy()
            state_, reward, done, info = self.env.step(action)
            state = state_
        return step


    # def classify(env,critic):
    #     state = env.reset()
    #     done = False
    #     step = 0
    #
    #     while not done:
    #         step += 1
    #
    #         # env.render()
    #         state = np.expand_dims(state, axis=0)
    #
    #         # 下面开始选择动作
    #         with t.no_grad():
    #             # prob = actor(state)
    #             state_, reward, done, info = envmodel.step(state,np.array([0]))
    #             V1 = critic(state_)
    #             state_, reward, done, info = envmodel.step(state,np.array([1]))
    #             V2 = critic(state_)
    #         if V1<V2:
    #             action = 1
    #         else:
    #             action = 0
    #
    #         state_, reward, done, info = env.step(action)
    #         if done | (step >= 500):
    #             done = True
    #         state = state_
    #     return step

    def learn(self):

        #下面采用时间差分方法学习，该方法的学习速度较快，且很稳，时间差分方法和蒙特卡洛方法各有自己的优势
        sl = Share_layer(self.dim[0])
        actor = Actor(sl,self.dim[1])
        critic = Critic(sl)
        actor.train()
        critic.train()

        # target_critic = DQNNet().cuda()
        # target_critic.load_state_dict(state_dict=torch.load('dqn.pt'))

        freeze_critic = copy.deepcopy(critic)

        target_critic = DQNNet(self.dim[0]+1) # +1维是动作dim，只针对离散单动作模型
        target_critic.load_state_dict(state_dict=torch.load('../dqn.pt'))
        to_target = copy.deepcopy(target_critic)

        # actor.load_state_dict(state_dict=t.load('ac.pt'))
        # critic.load_state_dict(state_dict=t.load('cr.pt'))

        #还需要两个优化器
        actor_optim = t.optim.Adam(actor.parameters(), lr = self.conf.actor_lr)
        critic_optim = t.optim.Adam(critic.parameters(), lr = self.conf.critic_lr)

        train_rewards = []
        test_rewards = []
        actor_losses = []
        critic_losses = []

        classify_rewards = np.empty((self.conf.classify_times,self.conf.epoches))

        # 初始化经验池
        pool = Memory(self.conf.replay_buffer_size)

        # 生成初始经验数据
        state = self.env.reset()
        while 1:
            state = np.reshape(state, (1, self.dim[0]))
            with t.no_grad():
                prob = actor(state)
            # 下面开始选择动作
            action = self.choose_action(prob,self.dim[1])
            state_, reward, done, info = self.env.step(action)
            state_ = np.reshape(state_, (1, self.dim[0]))
            if done:
                reward = -20
                mc = mcts_(state, state_, reward, action, done, critic, freeze_critic, target_critic, actor, self.conf)
                pool.store(mc)
                state = self.env.reset()
            else:
                mc = mcts_(state, state_, reward, action, done, critic, freeze_critic,target_critic, actor, self.conf)
                pool.store(mc)
                state = state_
            if pool.tree.data_pointer == 0: # 当指针再次为0，说明经验池已填满
                break

        for epoch in range(self.conf.epoches):
            tree_idx, batch, IsWeights = pool.sample(self.conf.batch_size)

            update_datas= []
            abs_errors = np.empty((self.conf.batch_size,))

            critic_loss = 0
            actor_loss = 0
            dqn_loss = 0

            target_times = [0, 0, 0]

            batch_X = np.empty((batch.shape[0],self.dim[0]+1))
            target_Y = np.empty((batch.shape[0],1))


            # 对批数据进行蒙特卡罗搜索,并重组得到学习批
            for i in range(batch.shape[0]):

                mcts = batch[i, 0]
                # prob[i] = node.static_root_treenode.pi
                # with torch.no_grad():
                #     prob[i] = actor(node.static_root_treenode.state)
                # # 下面开始选择动作
                # # prob[i, 1] = 1 - prob[i, 0]
                # # action = choose_action(prob[i])
                # actions[i] = action
                state = mcts.last_state
                reward = mcts.reward
                action = mcts.action
                state_ = mcts.static_root_treenode.state
                done = mcts.done
                batch_X[i,:] = np.append(state,action)
                next_state_action = mcts.static_root_treenode.pi.argmax()
                target_Y[i,0] = reward + self.conf.gamma * to_target(np.append(state_,next_state_action))

                # dqn_loss += Dqn_optimizer(target_critic, batch_X[i,:], target_Y[i,0], IsWeights[i,0])

                prob = actor(state)

                if done:
                    init_state = self.env.reset()
                    init_state = np.reshape(init_state, (1, self.dim[0]))
                    V_s_ = 0
                    with torch.no_grad():
                        sub_prob = actor(init_state)
                    sub_action = self.choose_action(sub_prob,self.dim[1])
                    init_state_, sub_reward, done, info = self.env.step(sub_action)
                    init_state_ = np.reshape(init_state_, (1, self.dim[0]))
                    # target_V_s_ = 0
                    if done:
                        sub_reward = -20
                    update_datas.append(
                        mcts_(init_state, init_state_, sub_reward, sub_action, done, critic, freeze_critic,target_critic, actor,
                              self.conf))
                else:
                    init_state = state_

                    mcts.control()
                    target_times = mcts.refresh(target_times)

                    V_s_ = mcts.static_root_treenode.value.item()
                    with torch.no_grad():
                        sub_prob = actor(init_state)
                    sub_action = self.choose_action(sub_prob,self.dim[1])
                    sub_reward = mcts.static_root_treenode.rewards[sub_action]
                    done = mcts.static_root_treenode.children[sub_action].done
                    sub_mcts = mcts_(init_state, init_state, sub_reward, sub_action, done, critic, freeze_critic,target_critic, actor,
                                     self.conf)
                    sub_mcts.static_root_treenode = mcts.static_root_treenode.children[sub_action]

                    update_datas.append(sub_mcts)

                # 下面开始学习，先学习的是critic网络，接着才是actor网络
                abs_errors[i] = abs(self.Critic_learn(critic_optim, critic, V_s_, state, reward, state_, IsWeights[i,0]))
                critic_loss += abs_errors[i]
                actor_loss += self.Actor_learn(actor_optim, critic, V_s_, state, state_, action, reward, prob, IsWeights[i,0])

            dqn_loss = Dqn_optimizer(target_critic, batch_X, target_Y, IsWeights)
            # 复制网络
            if ((epoch + 1) % self.conf.copy_iter) == 0:
                to_target = copy.deepcopy(target_critic)
                freeze_critic = copy.deepcopy(critic)
            # next_batch = np.array(sample_batch+other_batch) #一分为二，都放入经验池
            update_datas = np.array(update_datas)  # 一分为二，本次采用的才放入经验池
            for data in update_datas:
                data.reinit()

            pool.batch_update(tree_idx, abs_errors, update_datas)
            mean_step = 0
            for j in range(classify_rewards.shape[0]):
                self.env.seed(j)
                classify_step = self.classify(actor)
                classify_rewards[j, epoch] = classify_step
                mean_step += classify_step
            mean_step = int(mean_step / classify_rewards.shape[0])
            critic_loss = critic_loss.item() / self.conf.batch_size
            actor_loss = actor_loss.item() / self.conf.batch_size
            # dqn_loss = dqn_loss/self.conf.batch_size

            test_rewards.append(mean_step)
            critic_losses.append(critic_loss)
            actor_losses.append(actor_loss)
            print('----epoch is----', epoch, '----classify step nums is----', mean_step, '----dqn_loss----', dqn_loss)

            if (epoch + 1) % 50 == 0:
                for i in range(2):
                    plotTree(update_datas[i].static_root_treenode,
                             '../treeFigure/' + str(int(epoch / 50)) + '-' + str(i) + '.html')

            if (epoch + 1) % 200 == 0:
                times = int((epoch + 1) / 500)
                # 保存训练网络参数
                t.save(actor.state_dict(), '../ac.pt')
                t.save(critic.state_dict(), '../cr.pt')

                # 保存实验结果
                # if os.path.exists('ex_data_3.xlsx'):
                #     book = openpyxl.load_workbook('ex_data_3.xlsx')
                # else:
                #     book = openpyxl.Workbook()
                # writer = pd.ExcelWriter('ex_data_3.xlsx', engine='openpyxl')
                # writer.book = book
                # writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
                # total_ex_data = [test_rewards, critic_losses, actor_losses]
                # total_ex_data = np.transpose(np.array(total_ex_data))
                # total_ex_data = pd.DataFrame(total_ex_data)
                # total_ex_data.columns = ['test_rewards', 'critic_losses', 'actor_losses']
                # total_ex_data.to_excel(writer, sheet_name="sheet" + str(times), index=False)
                # writer.save()
                # writer.close()  # 对程序中只读的workbook

                # fig = plt.figure(figsize=(12, 15))
                # ax = plt.subplot(2, 2, 2)
                # ax.plot(test_rewards)
                # ax.set_title("测试回报")
                # ax = plt.subplot(2, 2, 3)
                # ax.plot(critic_losses)
                # ax.set_title("Critic层损失")
                # ax = plt.subplot(2, 2, 4)
                # ax.plot(actor_losses)
                # ax.set_title("Actor层损失")
                # plt.show()

        # 保存训练网络参数

        t.save(actor.state_dict(), '../ac.pt')
        t.save(critic.state_dict(), '../cr.pt')
        for i in range(classify_rewards.shape[0]):
            np.save("../exdata/"+self.conf.env+"/data"+str(i),classify_rewards[i,:])

        # critic_losses = np.reshape(np.transpose(np.array(critic_losses)),(self.conf.epoches,1))
        # actor_losses = np.reshape(np.transpose(np.array(actor_losses)),(self.conf.epoches,1))
        # critic_losses = np.tile(critic_losses,(self.conf.classify_times,1))
        # actor_losses = np.tile(actor_losses,(self.conf.classify_times,1))
        # xdata = np.tile(np.array(range(1, self.conf.epoches+1)), (self.conf.classify_times, 1))
        # xdata = np.reshape(np.transpose(xdata), (classify_rewards.shape[0] * classify_rewards.shape[1], 1))
        # classify_rewards = np.reshape(np.transpose(classify_rewards), (classify_rewards.shape[0] * classify_rewards.shape[1], 1))
        # data = np.concatenate((xdata,actor_losses,critic_losses,classify_rewards),axis=1)
        #
        # data = pd.DataFrame(data,columns=['x','actor_loss','critic_loss','classify_rewards'])
        # data.to_csv('../exdata/data.csv')

