'''
Use Keras to create LSTM
LSTM is used in 'Chose action'
'''

import numpy as np
import tensorflow as tf
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.6
config.gpu_options.allow_growth=True

np.random.seed(1)
tf.set_random_seed(1)
MAX_EPISODES = 200
MAX_EP_STEPS = 200
LR_A = 0.0004  # learning rate for actor
LR_C = 0.0008  # learning rate for critic
LR_P = 0.001 # learning rate for predict
GAMMA = 0.98  # reward discount
TAU = 0.01  # soft replacement
MEMORY_CAPACITY = 10000
BATCH_SIZE = 64




class TD3(object):
    def __init__(self, a_dim, s_dim, a_bound, e_greedy=0.1,
                 delay = 5,# 动作延迟的时间 1 为正常状态
                 replace_target_iter=100,  # 更新Q现实网络参数的步骤数
                 memory_size=10000,  # 存储记忆的数量
                 batch_size=32,  # 每次从记忆库中取的样本数量
                 e_greedy_decrement=None
                 ):
        # memory的大小是：last state和state，共40+40=s_dim*2,a_dim是8，是8架飞机的8，1是reward
        # 一个bitch=[上次状态(40)，当前状态(40)，上一次动作（8）,上上一次动作（8），...，上次奖励,上上次奖励，...共delay个]
        self.memory = np.zeros((MEMORY_CAPACITY, delay*2+1,s_dim*2 + 1+a_dim), dtype=np.float32)
        self.a_dim, self.s_dim, self.a_bound, self.delay= a_dim, s_dim, a_bound,delay
        # 采用sumtree来存储经验
        # self.memory = Memory(capacity=memory_size)
        self.pointer = 0
        self.pointer2 = 0
        self.pointer3 = 0
        self.pointer4 = 0
        self.update_cnt = 0  # 更新次数
        self.policy_target_update_interval = 3  # 策略网络更新频率
        self.sess = tf.Session(config=config)
        # self.sess.run(tf.global_variables_initializer())
        # 这个要提到上面来，不然刚训练的参数会被重新初始化

        # 0 1 2 3 4 （右边是新的）
        self.S = tf.placeholder(tf.float32, [None, s_dim], 's')   # 1 2 3 4 , [0,1,2..]
        self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')  # 0 1 2 3
        self.R = tf.placeholder(tf.float32, [None, 1], 'r')  # 0 1 2 3
        self.A = tf.placeholder(tf.float32, [None, a_dim], 'a')  # 0 1 2 3 , [0,1,2..]
        self.S_A_ = tf.placeholder(tf.float32, [None, delay*2+1, s_dim+a_dim+1],'S_A_')
        self.S_A_now = tf.placeholder(tf.float32, [None, delay , s_dim + a_dim + 1], 'S_A_now')
        self.S_A__list = tf.unstack(self.S_A_now, axis=1)  # 把LSTM模型的输入数据按时间步数展开

        # self.a_list = tf.placeholder(tf.float32, [None, delay], 'a_list') # 动作列表，8*5，一条动作序列
        # self.a_list_ = tf.placeholder(tf.float32, [None, delay], 'a_list_')  # 动作列表，8*5
        # self.A_LIST = np.zeros((A_LIST_MAX, a_list_sim), dtype=np.float32)# 最大A_LIST_MAX个动作列表，动作池

        # isweights
        self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')

        self.epsilon_min = e_greedy
        self.epsilon_decrement = e_greedy_decrement
        self.epsilon = 1 if e_greedy_decrement is not None else self.epsilon_min

        # 搭建LSTM网络
        with tf.variable_scope('predict'):
            # 输入是状态+动作+奖励
            lstm_cell = tf.contrib.rnn.BasicLSTMCell(100, forget_bias=1.0)
            outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, self.S_A__list, dtype=tf.float32)  # tf.nn.dynamic_rnn
            output = outputs[-1]
            self.S_predict = tf.layers.dense(output, units=s_dim) # 1 2 3 4 , [0,1,2..]

        # # 搭建Actor网络
        # with tf.variable_scope('Actor'):
        #     self.a = self._build_a(self.S, scope='eval', trainable=True)
        #     a_ = self._build_a(self.S_, scope='target', trainable=False)
        #     sample = tf.distributions.Normal(loc=0., scale=1.)
        #     noise = tf.clip_by_value(sample.sample(1) * 0.5, -1, 1)
        #     noise_a_ = a_ + noise
        # 搭建Actor网络预测
        with tf.variable_scope('Actor'):
            self.a = self._build_a(self.S_predict, scope='eval', trainable=True)
            a_ = self._build_a(self.S_, scope='target', trainable=False)
            sample = tf.distributions.Normal(loc=0., scale=1.)
            noise = tf.clip_by_value(sample.sample(1) * 0.5, -1, 1)
            noise_a_ = a_ + noise
        with tf.variable_scope('Critic'):
            # assign self.a = a in memory when calculating q for td_error,
            # otherwise the self.a is from Actor when updating Actor
            q1 = self._build_c(self.S, self.a, scope='eval1', trainable=True)
            q1_ = self._build_c(self.S_, noise_a_, scope='target1', trainable=False)
            q2 = self._build_c(self.S, self.a, scope='eval2', trainable=True)
            q2_ = self._build_c(self.S_, noise_a_, scope='target2', trainable=False)



        # networks parameters
        self.p_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='predict')

        self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
        self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
        self.ce_params1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval1')
        self.ct_params1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target1')
        self.ce_params2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval2')
        self.ct_params2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target2')

        # self.pe_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Predict/Predict_e')
        # self.pt_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Predict/Predict_t')

        # # target net replacement
        # self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
        #                      for t, e in zip(self.at_params + self.ct_params1 + self.ct_params2 + self.pe_params,
        #                                      self.ae_params + self.ce_params1 + self.ce_params2 + self.pt_params)]
        #
        # self.hard_replace = [tf.assign(t, e)
        #                      for t, e in zip(self.at_params + self.ct_params1 + self.ct_params2 + self.pe_params,
        #                                      self.ae_params + self.ce_params1 + self.ce_params2 + self.pt_params)]
        # target net replacement
        self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
                             for t, e in zip( self.ct_params1 + self.ct_params2 + self.at_params,
                                              self.ce_params1 + self.ce_params2 + self.ae_params)]

        self.hard_replace = [tf.assign(t, e)
                             for t, e in zip( self.ct_params1 + self.ct_params2 + self.at_params,
                                              self.ce_params1 + self.ce_params2 + self.ae_params)]

        # 这里的+是不合理的吧？
        q_target = self.R + GAMMA * tf.minimum(q1_, q2_)
        # in the feed_dic for the td_error, the self.a should change to actions in memory
        self.loss_predict = tf.reduce_mean(tf.square(self.S_predict - self.S_A_[:, delay*2, :self.s_dim]))
        print(1111111111111111111111111111111111111111111111111)
        print(self.loss_predict )
        self._train_predict = tf.train.AdamOptimizer(learning_rate=LR_P).minimize(self.loss_predict,
                                                                                  var_list=self.p_params)
        self.a_loss = - tf.reduce_mean(q1)  # maximize the q
        self.atrain = tf.train.AdamOptimizer(LR_A).minimize(self.a_loss, var_list=self.ae_params)

        self.td_error1= tf.losses.mean_squared_error(labels=q_target, predictions=q1)
        self.ctrain1 = tf.train.AdamOptimizer(LR_C).minimize(self.td_error1, var_list=self.ce_params1)
        self.td_error2 = tf.losses.mean_squared_error(labels=q_target, predictions=q2)
        self.ctrain2 = tf.train.AdamOptimizer(LR_C).minimize(self.td_error2, var_list=self.ce_params2)


        # a_loss = - tf.reduce_mean(q1)  # maximize the q
        # self.atrain = tf.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=self.pe_params)

        self.sess.run(tf.global_variables_initializer())
        self.sess.run(self.hard_replace)  # 初始化目标网络

        self.saver = tf.train.Saver()  # 参数的保存

    def choose_action(self, observation,s_, s, a_list, r_list ):
        transition = np.hstack((s_, s, a_list, r_list))
        transition = np.expand_dims(transition, axis=0)
        S_A_ = transition[:,:, self.s_dim: ]
        actions_value = np.zeros((self.a_dim), dtype=np.float32)
        # uniform:默认参数为上界1，下界0
        # print("observation:", observation)
        if np.random.uniform() > self.epsilon:
            temp_obs = observation
            actions_value = self.sess.run(self.a, feed_dict={self.S_A_now:S_A_[:,:self.delay,:],self.S: temp_obs})[0]
            # 要取第一个维度的，用[0]
            print("--------------------神经网络决策结果actions_value:-----------", actions_value)

        else:
            for i in range(self.a_dim):
                actions_value[i] = np.random.uniform(-math.pi, math.pi)
            print("随机决策结果", actions_value)
        # self.store_a_list(actions_value)
        return actions_value

    def learn(self,num_lun,step):
        print("start learn---")
        # soft target replacement
        self.update_cnt += 1

        indices = np.random.choice(min(self.pointer,MEMORY_CAPACITY), size=BATCH_SIZE)
        BATCH = self.memory[indices,:, :]
        batch = BATCH[:,0,:]
        # 这里发现和原来的代码逻辑产生冲突，S_代表过去的，S代表现在的，但是前边数据s代表过去的，s_代表现在的，但改起来繁琐，直接换过。
        bs_ = batch[:, :self.s_dim] # 当前状态
        bs = batch[:, self.s_dim: self.s_dim *2] # 上一时刻状态
        ba = batch[:, -self.a_dim-1: -1] # 上一时刻动作
        br = batch[:, -1:] # 上一时刻奖励
        #print(br.shape)
        S_A_ = BATCH[:,:, self.s_dim: ]

        self.pointer2 += 1
        if self.pointer2 % 1 == 0:
            lossc1 = self.sess.run(self.td_error1,{self.S_A_now:S_A_[:,:self.delay,:],self.S_A_: S_A_,self.S: bs_, self.a: ba, self.R: br, self.S_: bs})
            lossc2 = self.sess.run(self.td_error2,{self.S_A_now:S_A_[:,:self.delay,:],self.S_A_: S_A_,self.S: bs_, self.a: ba, self.R: br, self.S_: bs})
            lossp = self.sess.run(self.loss_predict, {self.S_A_now:S_A_[:,:self.delay,:],self.S_A_: S_A_, self.S: bs_})
            with open('loss/loss.txt', 'a') as f:
                str1 = "第%d局 第%d步： lossc1: %f  lossc2: %f  losscp: %f \n" % (num_lun, step, lossc1, lossc2,lossp)
                f.write(str1)
        self.sess.run(self.ctrain1, {self.S_A_now:S_A_[:,:self.delay,:],self.S_A_: S_A_,self.S: bs_, self.a: ba, self.R: br, self.S_: bs})
        self.sess.run(self.ctrain2, {self.S_A_now:S_A_[:,:self.delay,:],self.S_A_: S_A_,self.S: bs_, self.a: ba, self.R: br, self.S_: bs})
        self.sess.run(self._train_predict, {self.S_A_now:S_A_[:,:self.delay,:],self.S_A_: S_A_, self.S: bs_})
        print(2111111111111111111111111111111111111111111111111)

        if self.update_cnt % self.policy_target_update_interval == 0:
            lossa = self.sess.run( self.a_loss, {self.S_A_now:S_A_[:,:self.delay,:],self.S_A_: S_A_,self.S: bs_, self.a: ba, self.R: br, self.S_: bs})
            self.sess.run(self.atrain, {self.S_A_now:S_A_[:,:self.delay,:],self.S_A_: S_A_,self.S: bs_, self.a: ba, self.R: br, self.S_: bs})
            self.sess.run(self.soft_replace)
            with open('loss/lossa.txt', 'a') as f:
                str1 = "第%d局 第%d步： lossa: %f \n" % (num_lun, step,lossa)
                f.write(str1)
        self.epsilon = self.epsilon - self.epsilon_decrement if self.epsilon > self.epsilon_min else self.epsilon_min


        # self.learn_step_counter += 1  # 每隔100步更新一次
        # print("epsilon: ", self.epsilon)
        # 记录结果

    def store_transition(self,s_, s, a_list, r_list ):
        # a_list 包括[-... 上上，上一步动作]
        transition = np.hstack((s_, s, a_list, r_list))
        # 水平方向进行叠加
        # self.memory.store(transition)
        index = self.pointer % MEMORY_CAPACITY  # replace the old memory with new memory
        self.memory[index, :,:] = transition
        self.pointer += 1
        self.pointer3 += 1
        if self.pointer3 % 100 == 0:
            print("transition: ", transition)

    # 输入时动作，8
    def store_a_list(self,a):
        # a 是8维
        # a_value = np.hstack(a)
        # print(a.type())
        print(self.a_list.type())
        a_list = self.a_list
        self.a_list[0:self.a_dim].assign(a)
        self.a_list[self.a_dim:] .assign( self.a_list_[0:-self.a_dim] )
        self.a_list_ = a_list
        print('self.a_list_',self.a_list_)
        print('self.a_list',self.a_list)
        if self.pointer4<self.delay*2+1:
            self.A_LIST[self.pointer4, :] = self.a_list
            self.pointer4 += 1
        else:
            self.A_LIST[0,:]=self.a_list
            for i in range(1,self.delay*2):
                self.A_LIST[i+1,:]=self.A_LIST[i,:]


    def _build_a(self, s, scope, trainable):
        # trainable = True if reuse is None else False
        with tf.variable_scope(scope):
            c_names, n_l1, w_initializer, b_initializer = \
                ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 64, \
                tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)

            w1 = tf.get_variable('w1', [self.s_dim, n_l1], initializer=w_initializer,collections=c_names, trainable=trainable)

            b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer,collections=c_names, trainable=trainable)


            l1 = tf.matmul(s, w1) + b1
            l1 = tf.nn.leaky_relu(l1)

            w2 = tf.get_variable('w2', [n_l1, self.a_dim], initializer=w_initializer,collections=c_names, trainable=trainable)

            b2 = tf.get_variable('b2', [1, self.a_dim], initializer=b_initializer,collections=c_names, trainable=trainable)

            out = tf.matmul(l1, w2) + b2

            o = tf.nn.tanh(out)
            return tf.multiply(o, self.a_bound, name='scaled_a')

    def _build_c(self, s, a, scope, trainable):
        print("s:", s.shape)
        # trainable = True if reuse is None else False
        with tf.variable_scope(scope):
            n_l1 = 30
            # 创建新的tensorflow变量，s_dim和n_l1为大小
            w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
            print("w1_s:", w1_s.shape)
            w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
            print("w1_a:", w1_a.shape)
            b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
            print("b1:", b1.shape)
            # matmul：矩阵相乘
            net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
            print("net:", net.shape)
            # net:(?,30)
            ret = tf.layers.dense(net, 1, trainable=trainable)  # Q(s,a)
            print("return:", ret.shape)
            # ret:(?,1)
            return ret

# 输入是一系列动作序列 8*5，此时状态40，输出是此时动作8
    def _build_p(self, s, a_list, scope, trainable):
        # trainable = True if reuse is None else Fals
        with tf.variable_scope(scope):
            c_names, n_l1, w_initializer, b_initializer = \
                ['predict_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 64, \
                tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)

            w1_1 = tf.get_variable('w1_1', [self.s_dim, n_l1], initializer=w_initializer, collections=c_names, trainable=trainable)

            b1_1 = tf.get_variable('b1_1', [1, n_l1], initializer=b_initializer, collections=c_names, trainable=trainable)

            w1_2 = tf.get_variable('w1_2', [self.s_dim, n_l1], initializer=w_initializer, collections=c_names, trainable=trainable)

            b1_2 = tf.get_variable('b1_2', [1, n_l1], initializer=b_initializer, collections=c_names, trainable=trainable)

            l1 = tf.matmul(s, w1_1) + b1_1+tf.matmul(a_list, w1_2) + b1_2
            l1 = tf.nn.leaky_relu(l1)

            w2 = tf.get_variable('w2', [n_l1, self.a_dim], initializer=w_initializer, collections=c_names, trainable=trainable)

            b2 = tf.get_variable('b2', [1, self.a_dim], initializer=b_initializer, collections=c_names, trainable=trainable)

            out = tf.matmul(l1, w2) + b2

            o = tf.nn.tanh(out)

            return tf.multiply(o, self.a_bound, name='scaled_p')

    def save_model(self, ver, id):  # 保存读取模型
        save_path = self.saver.save(self.sess, "./training/red_rl-%d-%d.ckpt" % (ver, id))

    def load_model(self, ver, id):
        self.saver.restore(self.sess, "./training/red_rl-%d-%d.ckpt" % (ver, id))
