#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
ActorNetwork.py 设置Actor网络
CriticNetwork.py 设置Critic网络
OU.py Ornstein-Uhlenbeck过程
ReplayBuffer.py 经验回放
actormodel.json 保存actor网络模型的结构
actormodel.h5 保存actor网络权重
criticmodel.json 保存critic网络模型的结构
criticmodel.h5 保存critic网络权重
ddpg.py 主代码
gym_torcs.py python与TORCS的接口
snakeoil3_gym.py 与TORCS服务器通信的python脚本
'''

from gym_torcs import TorcsEnv
import numpy as np
import random
import argparse
from keras.models import model_from_json, Model
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import Adam
import tensorflow as tf
import json

from ReplayBuffer import ReplayBuffer
from ActorNetwork import ActorNetwork
from CriticNetwork import CriticNetwork
from OU import OU
import timeit

OU = OU()       # Ornstein-Uhlenbeck Process

def playGame(train_indicator=1):    #1 means Train, 0 means simply Run
    BUFFER_SIZE = 100000     # 缓存大小，指网络存储能力
    BATCH_SIZE = 64          # 批尺寸，指一次处理多少样本
    GAMMA = 0.99             # 折扣系数
    TAU = 0.001              # 目标网络超参数
    LRA = 0.0001             #Learning rate for Actor  # Actor网络学习率
    LRC = 0.001              #Lerning rate for Critic  # Critic网络学习率
    action_dim = 3           # 加速、转向、制动
    state_dim = 29           # 29个传感器输入
    Reward_average_each_episode=[]
    np.random.seed(1337)    # 随机数种子，如果使用相同的数字，则每次产生的随机数相同，这里我的理解是定义了一个随机的初始值。
    vision = False
    EXPLORE = 100000.
    episode_count = 2000
    max_steps = 5000 #100000
    done = False
    step = 0
    epsilon = 1
    # Tensorflow限制GPU资源使用:动态申请缓存
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    # 将Keras作为tensorflow的精简接口
    from keras import backend as K
    K.set_session(sess)
    # 创建网络
    actor = ActorNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA)
    critic = CriticNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
    buff = ReplayBuffer(BUFFER_SIZE)    #Create replay buffer
    # Generate a Torcs environment
    env = TorcsEnv(vision=vision, throttle=True,gear_change=False)
    # Now load the weight
    # 加载权重并保存在actormodel.h5 和 criticmodel.h5中，通过json文件保存模型权重
    print("Now we load the weight")
    try:
        actor.model.load_weights("actormodel.h5")
        critic.model.load_weights("criticmodel.h5")
        actor.target_model.load_weights("actormodel.h5")
        critic.target_model.load_weights("criticmodel.h5")
        print("Weight load successfully")
    except:
        print("Cannot find the weight")

    print("TORCS Experiment Start.")
    for i in range(episode_count):
        print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
        if np.mod(i, 3) == 0:
            ob = env.reset(relaunch=True)   #relaunch TORCS every 3 episode because of the memory leak error
        else:
            ob = env.reset()
        # 保存state
        s_t = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY,  ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
        total_reward = 0.
        # 一个episode中的step                
        for j in range(max_steps):
            loss = 0 
            epsilon -= 1.0 / EXPLORE
            a_t = np.zeros([1,action_dim])    
            noise_t = np.zeros([1,action_dim])
            # Actor当前网络选取当前状态下的动作
            a_t_original = actor.model.predict(s_t.reshape(1, s_t.shape[0]))
            # 噪声
            noise_t[0][0] = OU.function(a_t_original[0][0],  0.0 , 0.60, 0.30)
            noise_t[0][1] = OU.function(a_t_original[0][1],  0.5 , 1.00, 0.10)
            noise_t[0][2] =  OU.function(a_t_original[0][2], -0.1 , 1.00, 0.05)
            # The following code do the stochastic brake
            """
            if random.random() <= 0.1:
                noise_t[0][2] += OU.function(noise_t[0][2],  0.2 , 1.00, 0.10)
            else:
                noise_t[0][2] +=  OU.function(noise_t[0][2], -0.1 , 1.00, 0.05)    
            """
            alpha=train_indicator * max(epsilon, 0)
            
            a_t[0][0] = a_t_original[0][0] + alpha * noise_t[0][0]
            a_t[0][1] = a_t_original[0][1] + alpha * noise_t[0][1]
            a_t[0][2] = a_t_original[0][2] + alpha * noise_t[0][2]
            """
            noise_t[0][0] += OU.function(noise_t[0][0],a_t_original[0][0], 0.60, 0.30)
            noise_t[0][1] += OU.function(noise_t[0][1],a_t_original[0][1]+0.5, 1.00, 0.10)
            noise_t[0][2] +=  OU.function(noise_t[0][2],a_t_original[0][2]-0.1, 1.00, 0.05)
            a_t[0][0] = (1-alpha) * a_t_original[0][0] +  alpha * noise_t[0][0]
            a_t[0][1] = (1-alpha) * a_t_original[0][1] +  alpha * noise_t[0][1]
            a_t[0][2] = (1-alpha) * a_t_original[0][2] +  alpha * noise_t[0][2]
            """
            if np.mod(j,20) == 10: 
                print("Noise:",alpha*noise_t[0])                
            """
            if np.mod(j,10) == 0: 
                print("Episode", i, "Step", step, "predicted action", a_t_original,"action",a_t)
            """
            # env.step()
            ob, r_t, done, info = env.step(a_t[0])
            # 保存状态
            s_t1 = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
            # 从存储回放器中随机小批量抽取N个变换阶段 (si, ai, ri, si+1)
            buff.add(s_t, a_t[0], r_t, s_t1, done)  # Add replay buffer
            # Do the batch update
            batch = buff.getBatch(BATCH_SIZE)
            # np.asarray()将输入数据转化为一个新的（copy）ndarray，ndarray对象是用于存放同类型元素的多维数组
            states = np.asarray([e[0] for e in batch])
            actions = np.asarray([e[1] for e in batch])
            rewards = np.asarray([e[2] for e in batch])
            new_states = np.asarray([e[3] for e in batch])
            dones = np.asarray([e[4] for e in batch])
            y_t = np.asarray([e[2] for e in batch])
            # critic目标网络计算
            target_q_values = critic.target_model.predict([new_states, actor.target_model.predict(new_states)])           
            # 计算y_i
            for k in range(len(batch)):
                if dones[k]==False:
                    y_t[k] = rewards[k] + GAMMA*target_q_values[k]
            # 训练
            if (train_indicator):
                loss += critic.model.train_on_batch([states,actions], y_t) 
                a_for_grad = actor.model.predict(states)
                grads = critic.gradients(states, a_for_grad)
                actor.train(states, grads)
                actor.target_train()
                critic.target_train()

            total_reward += r_t
            s_t = s_t1
            
            if np.mod(j,100) == 0: 
                print("Episode", i, "Step", step, "Action", a_t, "Reward", r_t, "Loss", loss)
            
            step += 1
            if done:
               Reward_average_each_episode.append(total_reward/j)
               print("Reward_average_each_episode   ",Reward_average_each_episode)
               break

        if np.mod(i, 3) == 0:  # 每三局更新一次权重
            if (train_indicator):
                print("Now we save model")
                # 只保存了模型权重而没有保存模型结构，节省内存空间,它保存的数据不能用于继续训练模型
                # 在加载模型权重之前，必须把网络结构定义好，并且模型的各个层的名称必须与保存模型权重时的各个层保持一致
                actor.model.save_weights("actormodel.h5", overwrite=True)
                with open("actormodel.json", "w") as outfile:
                    json.dump(actor.model.to_json(), outfile)
                critic.model.save_weights("criticmodel.h5", overwrite=True)
                with open("criticmodel.json", "w") as outfile:
                    json.dump(critic.model.to_json(), outfile)

        print("TOTAL REWARD @ " + str(i) +"-th Episode  : Reward " + str(total_reward))
        print("Total Step: " + str(step))
        print("")

    env.end()  # This is for shutting down TORCS
    print("Finish.")

if __name__ == "__main__":
    playGame()
