import copy
import random
import numpy as np
import gym
import torch.nn as nn
import torch as t
from torch.nn import functional as F
import matplotlib.pyplot as plt
import os
import pandas as pd

# env.close()
plt.rcParams['font.sans-serif'] = ['KaiTi', 'SimHei', 'FangSong']  # 汉字字体,优先使用楷体，如果找不到楷体，则使用黑体
plt.rcParams['font.size'] = 12  # 字体大小
plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号


eplison = 0.1
criterion = nn.CrossEntropyLoss()
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
gamma = 0.9
actor_lr = 0.001
critic_lr = 0.01
env = gym.make("CartPole-v0")
env.seed(1)     # reproducible, general Policy gradient has high variance
env = env.unwrapped  # 启用这句话，环境将一直模拟下去，否则环境将在第500代停止
batch_size = 1
epochs = 1000
copy_iter = 10


class Share_layer(nn.Module):
    def __init__(self):

        super(Share_layer, self).__init__()
        self.linear1 = nn.Linear(4, 30)
        nn.init.normal_(self.linear1.weight, 0, 0.1)
        nn.init.constant_(self.linear1.bias, 0.1)

    def forward(self, out):

        out = self.linear1(out)
        out = F.relu(out)

        return out



class Actor(nn.Module):

    def __init__(self, sl):

        super(Actor, self).__init__()

        self.share_layer = sl

        self.linear2 = nn.Linear(30, 2)
        # nn.init.normal_(self.linear2.weight, 0, 0.1)
        # nn.init.constant_(self.linear2.bias, 0.1)

    def forward(self, x):

        out = t.from_numpy(x).float()

        out = self.share_layer(out)
        out = F.softmax(self.linear2(out), dim = 1) #这个输出主要是用来使用概率来挑选动作
        return out


class Critic(nn.Module):

    def __init__(self, sl):

        super(Critic, self).__init__()

        self.share_layer = sl

        self.linear2 = nn.Linear(30, 1)
        # nn.init.normal_(self.linear2.weight, 0, 0.1)
        # nn.init.constant_(self.linear2.bias, 0.1)

    def forward(self, x):

        out = t.from_numpy(x).float()

        out = self.share_layer(out)

        out = self.linear2(out)


        return out

#下面该函数用来选择动作
def choose_action(prob):
    # print(prob)
    action = np.random.choice(a = 2, p = prob[0].detach().numpy())
    return action

    # action = np.random.choice(a = 6, p = prob[0].detach().numpy())
    # v = random.uniform(0, 1)
    # p, index = t.topk(prob, 1, dim = 1)

    # #下面开始eplison-greedy 算法
    # if v > eplison:
    #     #这里是求最大的状态价值函数对应的动作
    #     action = index[0][0].item()
    # else:
    #     #下面是随机产生动作
    #     action = random.randint(0, 1)

    # return action

#下面的函数主要用来计算Actor部分训练的损失函数
def Actor_learn(optim, critic, target_critic, s, s_, a, r, prob):

    '''
    根据当前的状态，奖励以及下一个时间步的章台完成损失函数的计算
    Parameters
    ----------
    critic : Critic（）实例
        用来估计当前时间的奖励的期望
    s : float array
        当前时间步的状态.
    a : int scalar
        当前时间步根据当前状态s所采取的动作
    r : float scalar
        当前时间步的奖励.
    s_ : float array
        下一个时间步的状态.
    logits : actor网络最后一层的输出
        用来计算交叉熵损失

    Returns
    -------
    Actor 的损失.

    '''
    V_s = critic(s)
    V_s_ = target_critic(s_)
    #开始计算动作对应的交叉熵
    a = t.tensor([a]).long()
    logp_a = criterion(prob, a)

    l = r + gamma * V_s_.item() - V_s
    l = l.item()
    loss = l * logp_a
    # print('Actor loss is :', loss)
    optim.zero_grad()

    loss.backward()
    optim.step()

    return loss


def Critic_learn(optim, critic, target_critic, s, r, s_):

    '''
    用来计算Critic网络的损失，来更新Critic网络
    Parameters
    ----------
    critic : Critic实例
        用来计算各个时间步的值函数.
    s : float array
        当前时间步的状态.
    r : float scalar
        当前时间步的奖励.
    s_ : float array
        下一个时间步的状态.

    Returns
    -------
    Critic网络的损失.
    '''

    V_s = critic(s)
    V_s_ = target_critic(s_)

    loss = (r + gamma * V_s_.item()- V_s)**2
    optim.zero_grad()
    loss.backward()

    optim.step()

    return loss

def classify(env,actor):
    state = env.reset()
    done = False
    step = 0

    while not done:
        step += 1

        # env.render()
        state = np.expand_dims(state, axis=0)

        # 下面开始选择动作
        with t.no_grad():
            prob = actor(state)
        if prob[0,0]<prob[0,1]:
            action = 1
        else:
            action = 0

        state_, reward, done, info = env.step(action)
        if done | (step >= 500):
            done = True
        state = state_
    return step

def learn():

    #下面采用时间差分方法学习，该方法的学习速度较快，且很稳，时间差分方法和蒙特卡洛方法各有自己的优势
    sl = Share_layer()
    actor = Actor(sl)
    critic = Critic(sl)
    target_critic = copy.deepcopy(critic)

    actor.train()
    critic.train()

    #还需要两个优化器
    actor_optim = t.optim.Adam(actor.parameters(), lr = actor_lr)
    critic_optim = t.optim.Adam(critic.parameters(), lr = critic_lr)
    train_rewards = []
    test_rewards = []
    actor_losses = []
    critic_losses = []

    for i in range(epochs):

        state = env.reset()
        done = False
        sum_rewards_i = 0
        step = 0
        critic_loss = 0
        actor_loss = 0

        while not done:

            step += 1

            # env.render()
            state = np.expand_dims(state, axis = 0)

            prob= actor(state)

            #下面开始选择动作
            action = choose_action(prob)

            state_, reward, done, info = env.step(action)
            if done | (step >= 500):
                reward = -20.0
                done = True
            sum_rewards_i += reward


            #下面开始学习，先学习的是critic网络，接着才是actor网络
            critic_loss += Critic_learn(critic_optim, critic, target_critic, state, reward, state_)
            actor_loss += Actor_learn(actor_optim, critic, target_critic, state, state_, action, reward, prob)
            state = state_
        classify_step = classify(env, actor)
        critic_loss = critic_loss.item() / step
        actor_loss = actor_loss.item() / step


        if (i+1) % copy_iter == 0:
            target_critic = copy.deepcopy(critic)
        train_rewards.append(sum_rewards_i)
        test_rewards.append(classify_step)
        critic_losses.append(critic_loss)
        actor_losses.append(actor_loss)
        print('----epoch is----', i, '----classify step nums is----', classify_step)

    # 保存训练网络参数
    t.save(actor.state_dict(), 'ac.pt')

    # 保存实验结果
    total_ex_data = [train_rewards,test_rewards,critic_losses,actor_losses]
    total_ex_data = np.transpose(np.array(total_ex_data))
    total_ex_data = pd.DataFrame(total_ex_data)
    total_ex_data.columns=['train_rewards','test_rewards','critic_losses','actor_losses']
    total_ex_data.to_excel('ex_data.xlsx')

    fig = plt.figure(figsize=(12, 15))
    ax = plt.subplot(3,1,1)
    ax.plot(train_rewards)
    ax.set_title("训练回报")
    ax = plt.subplot(3,1,2)
    ax.plot(critic_losses)
    ax.set_title("Critic层损失")
    ax = plt.subplot(3, 1, 3)
    ax.plot(actor_losses)
    ax.set_title("Actor层损失")

    plt.show()



if __name__ == "__main__":
    learn()
    env.close()
