import tensorflow as tf
import matplotlib.pyplot as plt
import AI_TF_DDPG.DDPG.NN as NN
from mpl_toolkits.mplot3d import axes3d
import AI_TF_DDPG.DDPG.IO_Tools as iot
import numpy as np
import time
import multiprocessing as mp

###########################  hyper parameters  ####################
MAX_EPISODES = 50
MAX_EP_STEPS = 200
LR_A = 0.001    # learning rate for actor
LR_C = 0.001    # learning rate for critic
GAMMA = 0.9     # reward discount
REPLACEMENT = [
    dict(name='soft', tau=0.01),
    dict(name='hard', rep_iter_a=600, rep_iter_c=500)
][1]            # Target和evaluate网络参数替代策略
MEMORY_CAPACITY = 1000
BATCH_SIZE = 32
VAR=0.15 #探索噪音
Var_discount=0.9995
#############################结果记录#################################
Record_Reward=[]
####################################################################

class ddpg():
    def __init__(self,action_dim,state_dim,action_bound,first_step_strategy,
                 RENDER = True,OUTPUT_GRAPH = True):
        self.render = RENDER
        self.output_graph = OUTPUT_GRAPH
        self.a_dim=action_dim    # [2,1],# 动作维度[角度2维连续，攻击1维onehot]
        self.s_dim=state_dim    # []输入的维度
        self.a_bound=action_bound   # 连续动作的幅度
        self.fss=first_step_strategy    # FirstStep的参数


    def fig_show(self,env,fig,show=False):
        if show:
            plt.clf()  # 清除前一回合图像
            ax = fig.add_subplot(111, projection='3d')  # 重新建立三维坐标
            env.render(fig, ax)

    def trans_action(self,a,var):
        # a = np.clip(np.random.normal(a, var), -self.a_bound, self.a_bound)  # 通过添加噪音进行探索
        a[0][0] = np.clip(np.random.normal(a[0][0], var), -self.a_bound, self.a_bound)
        a[0][1] = np.clip(np.random.normal(a[0][1], var), -self.a_bound, self.a_bound)
        a[0][2] = np.clip(np.random.normal(a[0][2], var), 0, 1)
        fly,bit,action=[],[],[]
        fly.append(a[0][0:2])
        # bit.append(np.random.choice(2,1,p=[1-a[0][2],a[0][2]]))
        bit_choose=[1] if a[0][2]>0.68 else [0]##################打单阈值
        bit.append(bit_choose)
        action.append(fly)
        action.append(bit)
        return action

    def main(self,envm):
        with tf.name_scope('S'):
            S = tf.placeholder(tf.float32, shape=[None, self.s_dim], name='s')
        with tf.name_scope('R'):
            R = tf.placeholder(tf.float32, [None, 1], name='r')
        with tf.name_scope('S_'):
            S_ = tf.placeholder(tf.float32, shape=[None, self.s_dim], name='s_')

        M=iot.Memory(MEMORY_CAPACITY,dims=2 * self.s_dim + self.a_dim[0]+self.a_dim[1] + 1)

        var=VAR   #########################探索噪音

        with tf.Session() as sess:
            actor = NN.Actor(S, S_, sess, self.a_dim, self.a_bound, LR_A, REPLACEMENT)
            critic = NN.Critic(S, S_, R, sess, self.s_dim, self.a_dim, LR_C, GAMMA, REPLACEMENT, actor.a_eval,
                               actor.a_target)
            actor.add_grad_to_graph(critic.a_grads)

            sess.run(tf.global_variables_initializer())
            if self.output_graph:
                tf.summary.FileWriter("AI_WeDo/ddpglogs/", sess.graph)

            for i in range(MAX_EPISODES):

                if self.render:
                    fig = plt.figure()
                    ax = fig.add_subplot(111, projection='3d')

                observation=envm.reset()
                envm.move()
                observation=envm.FirstStep(self.fss)
                ep_reward=0
                done=True

                while done:
                    if self.render:
                        self.fig_show(envm,fig,self.render)
                    s=iot.preprocess(observation).get_input()
                    a=actor.choose_action(s)
                    action=self.trans_action(a,var)
                    #print(action)
                    observation,reward,done=envm.step(action)
                    s_=iot.preprocess(observation).get_input()
                    M.store_transition(s,action,reward,s_)

                    if M.pointer>MEMORY_CAPACITY:
                        var*=Var_discount    # 渐渐减少探索性
                        b_M = M.sample(BATCH_SIZE)
                        b_s = b_M[:, :self.s_dim]
                        b_a = b_M[:, self.s_dim: self.s_dim + self.a_dim[0]+self.a_dim[1]]
                        b_r = b_M[:, -self.s_dim - 1: -self.s_dim]
                        b_s_ = b_M[:, -self.s_dim:]

                        critic.learn(b_s, b_a, b_r, b_s_)
                        actor.learn(b_s)

                    s=s_
                    ep_reward+=reward
                    #print(ep_reward)

                    if not done:
                        print('Episode:', i, ' Reward: %i' % int(ep_reward), 'Explore: %.2f' % var,)
                        Record_Reward.append(int(ep_reward))
                        if ep_reward>1000:
                            self.render=True
                        break

                if self.render:
                    plt.close()   # 回收内存

                if self.render:
                    plt.savefig('Env/Records/figs/'+time.strftime("%m-%d-%H-%M", time.localtime())+'.png')
                    plt.close()
                envm.excel_save()


        x=[x+1 for x in range(len(Record_Reward))]
        plt.plot(x,Record_Reward,label='Reward',color='b',marker='o')
        plt.xlabel('Episode')
        plt.ylabel('Reward')
        plt.title('1V1')
        plt.legend()
        plt.savefig('Env/Records/Result_fig/R' + time.strftime("%m-%d-%H-%M", time.localtime()) + '.eps')
        plt.show()

