import sys
import gym
from tqdm import tqdm
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data as Data
import numpy as np
import matplotlib.pyplot as plt
import os
from Replay_Buffer import ReplayBuffer
from CartPole_v1_Step import CartPoleStep

os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'


# 定义神经网络
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()

        self.fc1 = nn.Linear(5, 30)
        # self.bn1 = nn.BatchNorm1d(30).cuda()
        self.fc2 = nn.Linear(30, 4)
        # self.bn2 = nn.BatchNorm1d(4).cuda()
        self.fc3 = nn.Linear(4, 1)

    def forward(self, x):
        # x = self.bn1(F.relu(self.fc1(x)))
        # x = self.bn2(F.relu(self.fc2(x)))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = torch.exp(self.fc3(x)) # 首先限制输出在0-1之间，然后给定一个较大的乘数扩大Q值
        return x

    def num_float_features(self, x):
        size = x.size()[1:]
        num_features = 1
        for s in size:
            num_features *= s
        return num_features


def DQN():
    # 定义运行环境
    env = gym.make("CartPole-v1")
    envmodel = CartPoleStep()

    # 定义控制参数
    conf = {'gamma':0.9,
              'epoches':15000,
            'copyiter':50}

    # 初始化经验池
    config = {'replay_buffer_size':15000,'batch_size':512} # 经验池的深度影响了探索的广度；批次数影响了探索的深度
    pool = ReplayBuffer(config)

    # 生成初始经验数据
    observation = env.reset()
    for i in range(int(pool.buffer_size)):
        action = env.action_space.sample()  # your agent here (this takes random actions)
        next_observation, reward, done, info = env.step(action)

        if done:
            reward = -20
            pool.add(observation, action, reward, next_observation, done)
            observation = env.reset()
        else:
            pool.add(observation, action, reward, next_observation, done)
    pred_net = Net()
    target_net = copy.deepcopy(pred_net) # 将预测网络参数拷贝给目标网络
    print(pred_net)
    for name,parameters in pred_net.named_parameters():
        print(name,':',parameters.size())


    # 定义训练过程
    train_bar = tqdm(range(conf['epoches']))
    loss_sequence = []
    for epoch in train_bar:
        batch = pool.sample()
        batch_X = np.concatenate((batch[0],batch[1]),axis=1) # 将s和a拼接在一起

        # 计算目标值并更新经验池，当terminal为True时，输出0，否则输出动作集{a}对应的最大Q值
        # 此外，对于terminal为True的情况，将停止从该状态继续更新，删去该组并再次随机生成新的
        target_Y = np.ones((pool.batch_size,1))*(-sys.maxsize)
        chosed_action = np.zeros((pool.batch_size,1))
        for a in range(2):
            actions_batch = np.ones((pool.batch_size,1))*a
            batch_nextX = np.concatenate((batch[3],actions_batch),axis=1)
            with torch.no_grad():
                batch_nextX = torch.tensor(batch_nextX,dtype=torch.float32)
                tmp_Y = target_net(batch_nextX)
                tmp_Y = np.array(tmp_Y)
                chosed_action = (tmp_Y > target_Y) * a + (tmp_Y <= target_Y) * chosed_action
                target_Y = np.maximum(target_Y,tmp_Y) # 选取动作集{a}对应的最大Q值
        if any(batch[4]): # 当没有终止状态时，不做额外的判断，节省计算资源
            flag_vector = ~batch[4] + 0 # 对完成标志取反，并转化成可乘形式
            flag_matrix = np.tile((flag_vector).reshape((pool.batch_size, 1)), batch[3].shape[1])
            target_Y = flag_vector * target_Y * conf['gamma'] + batch[2] # 即Q = r + gamma * max(Q(s+1,a,target_net)) or r + 0
            batch[3] = flag_matrix * batch[3] + (~np.array(flag_matrix,dtype=bool)+0) * (np.random.rand(batch[3].shape[0],batch[3].shape[1])*0.1-0.05) #用[-0.05,0.05]的随机状态重新填充已达到终止的状态
            chosed_action = flag_vector * chosed_action + (~np.array(flag_vector,dtype=bool)+0) * (np.random.randint(0,2,size=batch[2].shape))
        else:
            target_Y = conf['gamma']*target_Y + batch[2]
        observation,reward,done,info = envmodel.step(batch[3],chosed_action)
        pool.add(batch[3], chosed_action,reward,observation,done)

        # 计算预测值
        batch_X = torch.tensor(batch_X, dtype=torch.float32,requires_grad=True)
        pred_Y = pred_net(batch_X)

        # 计算均方误差
        target_Y =  torch.tensor(target_Y, dtype=torch.float32)
        loss = F.mse_loss(pred_Y,target_Y)

        # 梯度下降
        optimizer = torch.optim.Adam(pred_net.parameters(), weight_decay=pow(10, -6))
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        loss_sequence.append(loss.item())

        # 复制网络
        if ((epoch+1)%conf['copyiter'])==0:
            target_net = copy.deepcopy(pred_net)

        train_bar.set_postfix(loss=loss.item())

    # 保存训练网络参数
    torch.save(pred_net.state_dict(),'dqn.pt')

    # 绘制loss图
    plt.plot(loss_sequence)
    plt.title('loss')
    plt.show()

DQN()