#coding=utf-8
import random
import parameters as pms
import numpy as np
import tensorflow as tf
import os
from storage import Storage
from ops import linear
from base.net.net import *
from tqdm import tqdm
class DQNAgent(object):
    def __init__(self,env,sess,model=None,render=False):
        self.sess=sess
        self.env=env
        self.action_space=env.action_space #set action_space
        self.trainNumber=0
        self.render=render
        with tf.variable_scope('step'):
            self.step_op = tf.Variable(0, trainable=False, name='step')
            self.step_input = tf.placeholder('int32', None, name='step_input')
            self.step_assign_op = self.step_op.assign(self.step_input)
        self.build_net(model)
    def build_net(self,model):
        # 训练网络
        self.s_t=tf.placeholder(dtype='float32',shape=[None,pms.stateSize],name='s_t')
        self.targetValue = tf.placeholder(dtype='float32', shape=[None], name='target_value')
        self.actionChoose = tf.placeholder(dtype='float32', shape=[None,self.env.action_space.n], name='actionChoose')
        self.trainNet=TrainNet({'data':self.s_t})
        self.q=self.trainNet.layers['q']
        # target网络
        self.target_s_t=tf.placeholder(dtype='float32',shape=[None,pms.stateSize],name='target_s_t')
        self.targetNet=TargetNet({'data':self.target_s_t})
        self.q_t=self.targetNet.layers['q_t']
        # 优化器
        self.q_acted = tf.reduce_sum(self.q * self.actionChoose, reduction_indices=1, name='q_acted')  # 计算出执行action动作所得到的Q值
        self.delta = self.targetValue - self.q_acted
        self.loss = tf.reduce_mean(0.5*tf.square(self.delta), name='loss')  # 计算loss值
        self.learning_rate_step = tf.placeholder(dtype='int64', shape=None, name='learning_rate_step')
        self.learning_rate_op = tf.maximum(pms.learning_rate_minimum,
                                           tf.train.exponential_decay(
                                               pms.learning_rate,
                                               self.learning_rate_step,
                                               pms.learning_rate_decay_step,
                                               pms.learning_rate_decay,
                                               staircase=True))
        self.optim = tf.train.GradientDescentOptimizer(
            self.learning_rate_op).minimize(self.loss)  # 优化目标
        #summary历史数据
        #summray loss
        self.lossInput=tf.placeholder(dtype='float32',shape=None,name="lossInput")
        self.lossSummaryOp=tf.scalar_summary('loss',self.lossInput)
        #summary success_steps_episode
        self.success_steps_episode_placeholder=tf.placeholder(dtype="int32",shape=None,name="success_steps_episode")
        self.success_steps_episode_summary_op=tf.scalar_summary("success_steps_episode",self.success_steps_episode_placeholder)
        #运行
        self.sess.run(tf.initialize_all_variables())
        if(pms.train_flag):
            self.writer = tf.train.SummaryWriter('./logs/%s' % pms.logName, self.sess.graph)
        self.saver = tf.train.Saver(max_to_keep=30)
        self.load_model(model)
        self.updateTarget()
    def act(self,state,eGreedy):#执行动作
        random_number = random.uniform(0, 1)
        if random_number < (eGreedy-0.3):
            return self.action_space.sample()
        elif random_number>=(eGreedy-0.3) and random_number<(eGreedy-0.2):
            return 2
        else:
            return self.actTest(state)
            # return 2
    def actTest(self,state):#用于测试
        targetQ=self.sess.run(self.q_t,feed_dict={self.target_s_t:[state]})
        action=np.argmax(targetQ,axis=1)[0]
        return action
    def startTrain(self):
        self.train_iter_number=pms.train_iter_number
        self.storage = Storage(pms.storageMaxSize)  # 开辟内存空间
        update_step = 0
        startStep = self.getStartStep()
        state_t = self.env.reset()
        self.success_steps_episode=0 #一个场景的总步数
        self.sum_reward_episode=0.0 #一个场景的总回报
        self.step=0
        for self.episode in tqdm(range(0, pms.max_episode)):
            if (self.render):
                self.env.render()
            eGreedy = pms.epEnd + max(0., (pms.epStart - pms.epEnd) * (
            pms.max_episode - max(0., self.episode)) / pms.max_episode)
            done=False
            while(done==False):
                actionNum = self.act(state_t, eGreedy)  # get action
                state_t_plus_1, reward, done, info = self.env.step(actionNum)  # do action
                self.sum_reward_episode += reward
                self.success_steps_episode += 1
                self.storage.saveTuple(state_t, actionNum, reward, done)
                state_t = state_t_plus_1
            if (done):
                # print 'reward is : %f'%reward
                state_t = self.env.reset()
                # average_reward_episode_temp=self.sum_reward_episode/self.success_steps_episode
                self.writer.add_summary(self.sess.run(self.success_steps_episode_summary_op,feed_dict={self.success_steps_episode_placeholder:self.success_steps_episode}),self.episode)
                print("总步数：%d 总回报：%f eGreedy:%f 场景数:%d" % (self.success_steps_episode,self.sum_reward_episode,eGreedy,self.episode))
                self.train_iter_number = min(pms.train_iter_number,self.success_steps_episode)
                self.success_steps_episode = 0
                self.sum_reward_episode = 0.0
            #判断是否可以进行训练，若可以进行训练则开始训练
            overallSize = self.storage.getBufferSize()
            if overallSize > pms.startTrainSize:
                # print "trainning..."
                for j in range(self.train_iter_number):
                    self.train()
            self.updateTarget()


    def train(self):#使用选择出来的训练数据进行训练
        self.trainNumber+=1
        #设定输入
        self.s_t_data, self.action_data, self.s_t_plus_1_data, self.done_data,self.reward_data=self.storage.sample()
        #计算target值
        targetQ=self.q_t.eval({self.target_s_t:self.s_t_plus_1_data})
        maxQ= np.max(targetQ, axis=1)
        maxQ=np.resize(maxQ,(pms.batchSize,1))
        targetValue=self.done_data*maxQ * pms.discount  + self.reward_data
        #训练迭代
        _,loss,q_acted=self.sess.run([self.optim,self.loss,self.q_acted],feed_dict={self.targetValue:np.resize(targetValue,(pms.batchSize,))
            ,self.actionChoose:self.action_data
            ,self.learning_rate_step: self.trainNumber
            ,self.s_t:self.s_t_data})
        #存储模型
        if(self.trainNumber%pms.saveModelStep==pms.saveModelStep-1):
            self.step_assign_op.eval({self.step_input: self.step + 1})
            self.saveModel(self.trainNumber+1)
        #存储历史数据
        if(self.trainNumber>100):
            self.writer.add_summary(self.sess.run(self.lossSummaryOp,{self.lossInput:loss}),self.trainNumber)
    def getStartStep(self):
        return self.step_op.eval()
    def updateTarget(self):
        for op_name in self.trainNet.layers.keys():
            with tf.variable_scope(op_name, reuse=True):
                try:
                    var_w = tf.get_variable('weights')
                    var_b = tf.get_variable('biases')
                except ValueError:
                    # print("Do not exist %s/weights(biases)" % op_name)
                    continue
            with tf.variable_scope(op_name + "_t", reuse=True):
                try:
                    var_w_t = tf.get_variable('weights')
                    var_b_t = tf.get_variable('biases')
                except ValueError:
                    # print("Do not exist %s/weights(biases)" % op_name)
                    continue
            self.sess.run(var_w_t.assign(var_w))
            self.sess.run(var_b_t.assign(var_b))
    def saveModel(self,step):
        print(" [*] Saving checkpoints...")
        if not os.path.exists(pms.saveModelPath):
            os.makedirs(pms.saveModelPath)
        self.saver.save(self.sess,pms.saveModelPath,step)
    def load_model(self,modelName=None):
        print(" [*] Loading checkpoints...")
        ckpt = tf.train.get_checkpoint_state(pms.saveModelPath)
        if ckpt and ckpt.model_checkpoint_path:
            if(modelName==None):
                ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
            else:
                ckpt_name=modelName
            fname = os.path.join(pms.saveModelPath, ckpt_name)
            self.saver.restore(self.sess, fname)
            print(" [*] Load SUCCESS: %s" % fname)
            return True
        else:
            print(" [!] Load FAILED: %s" % pms.saveModelPath)
            return False
