

import random
import gym
import tensorflow as tf
import numpy as np

class Game(object):
    def __init__(self,game_name="CartPole-v0"):
        self.env = gym.make(game_name)
        self.done = False
        self.observation = self.reset()

    def reset(self):
        return self.env.reset()

    def step(self,action):
        self.observation,reward,self.done, info = self.env.step(action)
        if self.isDone():
            reward = -100
        return reward,self.observation

    def isDone(self):
        return self.done

    def getObservation(self):
        return self.observation
    
    def getAction(self,index=0,isRandom=False):
        if isRandom: return self.env.action_space.sample()
        else: return index

    def getActionNum(self):
        return self.env.action_space.n

game = Game()
input_data = tf.placeholder("float",[None,4])
action = tf.placeholder("float",[None,game.getActionNum()])

def network(input_):
    weights = {"fc1":tf.get_variable("fc1_weights",[4,16],initializer=tf.constant_initializer(0)),
               "fc2":tf.get_variable("fc2_weights",[16,16],initializer=tf.constant_initializer(0)),
               "fc3":tf.get_variable("fc3_weights",[16,2],initializer=tf.constant_initializer(0))} 
    biases = {"fc1":tf.get_variable("fc1_biases",[16],initializer=tf.constant_initializer(0)),
              "fc2":tf.get_variable("fc2_biases",[16],initializer=tf.constant_initializer(0)),
              "fc3":tf.get_variable("fc3_biases",[2],initializer=tf.constant_initializer(0))}
    fc1 = tf.nn.relu(tf.matmul(input_,weights["fc1"]+biases["fc1"]))
    fc2 = tf.nn.relu(tf.matmul(fc1,weights["fc2"]+biases["fc2"]))
    fc3 = tf.nn.relu(tf.matmul(fc2,weights["fc3"]+biases["fc3"]))
    
    return fc3
    #return tf.nn.softmax(fc3)


INITIAL_EPSILON = 1.0
FINAL_EPSILON = 0.05
REPLAY_MEMORY = 50000
OBSERVE = 5000
EXPLORE = 50000
DISCOUNT_FACTOR = 0.99
BATCH = 64
def train_neural_network(input_):
    predict_Q = network(input_)
    argmax = tf.placeholder("float",[None,game.getActionNum()])
    gt = tf.placeholder("float",[None])
 
    action = tf.reduce_sum(predict_Q * argmax)
    cost = tf.reduce_mean(tf.square(action-gt))
    optimizer = tf.train.AdamOptimizer(1e-6).minimize(cost)
    
    input_data = game.reset() # restart a new game

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        n = 0
        epsilon = INITIAL_EPSILON
        D = []
        while True:
            # create a single example
            action_t = predict_Q.eval(feed_dict={input_:[input_data]})[0]
            argmax_t = np.zeros([game.getActionNum()],dtype=np.int)
            if(random.random() <= epsilon): # use epsilon to balance the explore and exploit
                maxIndex = random.randrange(game.getActionNum())
            else:
                maxIndex = np.argmax(action_t)
            argmax_t[maxIndex] = 1
            if epsilon > FINAL_EPSILON:
                epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE

            # implement the experience replay
            reward, next_input_data = game.step(game.getAction(maxIndex))
            D.append((input_data,argmax_t,reward,next_input_data))
            if len(D) > REPLAY_MEMORY:
                D.pop(0)
            loss = 0
            if n > OBSERVE:
                minibatch = random.sample(D,BATCH)
                input_data_batch = [d[0] for d in minibatch ]
                argmax_batch = [d[1] for d in minibatch ]
                reward_batch = [d[2] for d in minibatch ]
                next_input_data_batch = [d[3] for d in minibatch ] 
           
                gt_batch = [] 
                out_batch = predict_Q.eval(feed_dict={input_:input_data_batch})
                for i in range(0,len(minibatch)):
                    gt_batch.append(reward_batch[i]+ DISCOUNT_FACTOR * np.max(out_batch[i]))
                #optimizer.run(feed_dict={gt:gt_batch, argmax:argmax_batch, input_:input_data_batch})
                loss,_ = sess.run([cost,optimizer],feed_dict={gt:gt_batch, argmax:argmax_batch, input_:input_data_batch})
            input_data = next_input_data
            n = n+1
            if n % 10000 == 0:
                saver.save(sess,"saver/game.cpk",global_step=n)
            if n%1000 == 0:
                print n,"loss:",loss,"epsilon:",epsilon," ","action:",maxIndex," ","reward:",reward
            #if reward < 1.0: print "reward less than 1.0"
            if game.isDone():
                input_data = game.reset() # game needs to restart
            if n > 50000000: # the max n
                break

if __name__ == "__main__": 
    train_neural_network(input_data)
