import tensorflow as tf
import matplotlib.pyplot as plt
import AI_WeDo.DDPG.NN as NN

###########################  hyper parameters  ####################
MAX_EPISODES = 200
MAX_EP_STEPS = 200
LR_A = 0.001    # learning rate for actor
LR_C = 0.001    # learning rate for critic
GAMMA = 0.9     # reward discount
REPLACEMENT = [
    dict(name='soft', tau=0.01),
    dict(name='hard', rep_iter_a=600, rep_iter_c=500)
][1]            # you can try different target replacement strategies
MEMORY_CAPACITY = 10000
BATCH_SIZE = 32
####################################################################

class ddpg:
    def __init__(self,action_dim,state_dim,action_bound,RENDER = False,OUTPUT_GRAPH = True):
        self.render = RENDER
        self.output_graph = OUTPUT_GRAPH
        self.a_dim=action_dim    # [2,1],# 动作维度[角度2维连续，攻击1维onehot]
        self.s_dim=state_dim    # []输入的维度
        self.a_bound=action_bound   # 连续动作的幅度


    def fig_show(self,env,fig,show=False):
        if show:
            plt.clf()  # 清除前一回合图像
            ax = fig.add_subplot(111, projection='3d')  # 重新建立三维坐标
            env.render(fig, ax)

    def main(self,envm):
        with tf.name_scope('S'):
            S = tf.placeholder(tf.float32, shape=[None, self.s_dim], name='s')
        with tf.name_scope('R'):
            R = tf.placeholder(tf.float32, [None, 1], name='r')
        with tf.name_scope('S_'):
            S_ = tf.placeholder(tf.float32, shape=[None, self.s_dim], name='s_')

        sess=tf.Session()
        actor= NN.Actor(S, S_, sess, self.a_dim, self.a_bound, LR_A, REPLACEMENT)

        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')

        for i in range(MAX_EPISODES):
            envm.reset()
            envm.FirstStep()
            for j in range(MAX_EP_STEPS):
                envm.Step()


