# coding = gbk

import tensorflow as tf
import numpy as np


class DDPG(object) :
	m_维度 = 0
	m_值范围 = [0,1]

	def __init__(self, in维度, out维度, 值范围):
		self.in维度 = in维度
		self.m_值范围 = 值范围

		self.记忆库 = np.zeros()

		self.sess = tf.Session()

		self.m_a当前状态 = tf.placeholder(tf.float32, [None, in维度], 's')
		self.m_a下一个状态 = tf.placeholder(tf.float32, [None, in维度], 's_')
		self.m_s奖励 = tf.placeholder(tf.float32, [Nune, 1], 'r')

		with tf.variable_scope("动作"):
			self.m_a当前状态net = self._build_NET_a(self.m_a当前状态, scope='eval', traninable=True)
			self.m_a下一个状态net = self._build_NET_a(self.m_a下一个状态, scope='目标', traninable=True)
		with tf.variable_scope("价值"):
			q = self._build_NET_c(self.m_a当前状态net, self.m_a当前状态, scope='eval', traninable=True)
			q_ = self._build_NET_c(self.m_a下一个状态net, self.m_a下一个状态, scope='目标', trainable=False)

		self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='动作/eval')
		self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='动作/目标')
		self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='价值/eval')
		self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='价值/目标')


		self.soft_replace = [[tf.assign(ta, (1 - TAU) * ta + TAU * ea), tf.assign(tc, (1 - TAU) * tc + TAU * ec)]
                             for ta, ea, tc, ec in zip(self.at_params, self.ae_params, self.ct_params, self.ce_params)]

		q_target = self.R + GAMMA * q_

		td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
		self.ctrain = tf.train.AdamOptimizer(LR_C).minimize(td_error, var_list=self.ce_params)

		a_loss = - tf.reduce_mean(q)
		self.atrain = tf.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=self.ae_params)
		
		self.sess.run(tf.global_variables_initializer())


	def _build_NET_a(self, s, scope, trainable):
		net = tf.layers.dense(s, 100, activation=tf.nn.relu, name='l1', trainable=trainable)
		a = tf.layers.dense(net, self, self.m_维度, activation=tf.nn.tanh, name='a', trainable=trainable)
		return tf.multiply(a, self.m_值范围, name='scaled')

	def _build_NET_c(self, s, a, scope, trainable):
		n_l1 = 100
		w1_s = tf.get_variable('w1_s', [self.m_维度, n_l1], trainable=trainable)
		w1_a = tf.get_variable('w1_a', [self.m_维度, n_l1], trainable=trainable)
		b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
		return tf.multiply(a, self.a_bound, name='scaled_a')



	def choose_action(self, s):
		self.sess.run()



