# -*- coding: utf-8 -*-
import LIP
import sys
import matplotlib.pyplot as plt
import numpy as np
import load_trajectory as ld
import tensorflow as tf
import cartpole_pg
reload(cartpole_pg)
import signal
sys.path.append("./pyDmps")
sys.path.append(".")
import dmp_discrete
import net
reload(net)
reload(LIP)
record = []
def discount_rewards(reward, gamma):
    discounted_ep_rs = np.zeros_like(reward)
    running_add = 0
    for t in reversed(range(0, len(reward))):
        running_add = running_add * gamma + reward[t]
        discounted_ep_rs[t] = running_add
    return discounted_ep_rs

def norm_rewards(reward):
    reward -= np.mean(reward)
    reward /= (np.std(reward) + 0.0001)
    return reward


#收敛法，如果能从一个状态指定到一个目标状态，而目标状态可以回到自身，那么就没有问题
#首先有知道agent的能力,可以到达的最好水平，能力有限的情况下，是不可能通过，控制完成好的结果的
#能耐者，两部分也，先知其能，即知其限，耐者控制也，两者合并决定了最后的结果
#手柄可以操作的游戏都将被取代，因为其能小也，其耐可控也

def cb(s, f):
    print 'recv signal', s
    sys.exit(0)

# action_set = {0:[-10.0,-10],
              # 1:[10,10],
              # 2:[10,-10],
              # 3:[10,0],
              # 4:[0,0],
              # 5:[0,10],
              # 6:[0,-10],
              # 7:[-10,0],
              # 8:[-10,10]}
action_set = {0:[0.0,0.0],
              1:[4.0, 0.0],
              2:[-4.0, 0.0],
              3:[-10.0, -0.0],
              4:[10.0, 0.0]}
action_set1 = {0:[0.3],
              1:[0.4],
              2:[0.5],
              3:[0.6],
              4:[0.7]}
obs, actions, rewards = [], [], []
batch_obs, batch_actions, batch_rewards = [], [], []
batch = 0
eval_param = 0
random_x = 0.1
random_dx = 0.1
def reward_add(re, offset):
    l = len(re)
    for i in range(0, l):
        re[i] = re[i] + offset
update_time = 1
org = 0
def fun(lip):
    global i, external_force, observation, i_episode, agent
    global obs, actions, rewards, batch
    global random_x, random_dx, update_time, org
    global batch_obs, batch_actions, batch_rewards, eval_param, record
    #S: x dx ddx y[0] y[1]
    #A: dmp.goal[0](-0.7 0.7) tau(0.02,10.0) external_force[0] external_force[1]
    #R: down -10 not down 1
    action = agent.act(observation,eval_param)
    # if org == 1:
    # if i%20 ==0:
    # print(eval_param)
    # action = 3
    external_force = np.array(action_set[action])
    y, dy, ddy = dmp.step(tau=tau_value, external_force=external_force)
    lip.set_swing_foot_pos(y[0], y[1])
    #将观测，动作和回报存储起来
    # import ipdb; ipdb.set_trace()  # XXX BREAKPOINT
    observation, reward, done, info = lip.update()
    if eval_param == 2:
        if done != 0:
            dmp.y0[0] = -0.15
            dmp.goal[0] = 0.2
            dmp.reset_state()
            lip.reset(random_x, random_dx, 0.7, 0.001, 'left_leg')
            eval_param = 0
        if y[1] <= 0.0:
            if lip.stand_leg == 'left_leg':
                lip.switch_support_leg('right_leg')
                dmp.y0[0] = lip.left_foot_x
            else:
                lip.switch_support_leg('left_leg')
                dmp.y0[0] = lip.right_foot_x
            dmp.goal[0] = 0.2
            dmp.reset_state()
            observation, reward, done, info = lip.update()
            lip.update_orbital_energy()
            # print('eval state',lip.x,lip.dx,lip.E)
            reward1 = np.log(1.0/lip.x**2+0.001)
            reward2 = np.log(1.0/((lip.E-0.1)**2+0.001))
            reward3 = reward1 + reward2
            if  done == 2 or done == 3:
                reward3 = 0
            print('reward1',reward1,'reward2',reward2)
    elif y[1] <= 0.0 or done != 0:
        if lip.stand_leg == 'left_leg':
            lip.switch_support_leg('right_leg')
            dmp.y0[0] = lip.left_foot_x
        else:
            lip.switch_support_leg('left_leg')
            dmp.y0[0] = lip.right_foot_x
        #为了计算出有效的E
        observation, reward, done, info = lip.update()
        lip.update_orbital_energy()
        reward1 = np.log(1.0/(lip.x+0.05)**2+0.0001)
        reward2 = np.log(1.0/((lip.E-0.1)**2+0.0001))
        reward3 = reward1 + reward2
        if  done == 2 or done == 3:
            reward3 = 0
        # print('reward1',reward1,'reward2',reward2)
        dmp.y0[0] = -0.15
        dmp.goal[0] = 0.2
        dmp.reset_state()
        # print('random',random_x,random_dx)
        lip.reset(random_x, random_dx, 0.7, 0.001, 'left_leg')
        batch = batch + 1
        record.append(reward3)
        reward_add(rewards, reward3)
        batch_actions.extend(actions)
        batch_obs.extend(obs)
        batch_rewards.extend(rewards)
        obs, actions, rewards = [], [], []

    obs.append(observation)
    actions.append(action)
    rewards.append(reward)

    if batch == 10:
        i_episode = i_episode + 1
        if eval_param == 1:
            running_reward = np.sum(batch_rewards)
            print("episode:", i_episode, "rewards:", running_reward)
        if eval_param == 1:
            eval_param = 0
        # rewards = discount_rewards(rewards, 0.95)
        batch_rewards = norm_rewards(batch_rewards)
        agent.train_step(batch_obs, batch_actions, batch_rewards)
        fig = plt.figure(5)
        ax = fig.add_subplot(111)
        ax.plot(batch_rewards)
        fig.show()
        # sys.exit()
        batch_obs, batch_actions, batch_rewards = [], [], []
        batch = 0
        eval_param = 1
        i = 0
        update_time = update_time + 1
        if update_time%2 == 1:
            random_x = -0.1
            random_dx = 0.5
        else:
            random_x = -0.05
            random_dx = 0.5

if __name__ == "__main__":
    global random_dx, random_x
    i = 0
    external_force = np.array([0, 0])
    signal.signal(signal.SIGTERM, cb)
    signal.signal(signal.SIGINT, cb)
    trajectory = ld.loadTrajectory("swing_trajectory.txt")
    trajectory_x = trajectory[:, 0]
    trajectory_y = trajectory[:, 1]
    # plt.plot(trajectory_x, trajectory_y, 'r--', lw=2)
    dmp = dmp_discrete.DMPs_discrete(dmps=2, bfs=100)
    dmp.imitate_path(y_des=np.array([trajectory_x, trajectory_y]))
    dmp.goal[0] = 0.2
    dmp.y0[0] = -0.15
    dmp.reset_state()
    data_point_num = 250
    tau_value = (1 / dmp.dt)/ data_point_num
    hparams = {
            'input_size': 5,
            'hidden_size': 10,
            'num_actions': 5,
            'learning_rate': 0.1
    }
    # environment params
    eparams = {
            'num_batches': 40,
            'ep_per_batch': 10,
    }
    tf.Graph().as_default()
    sess = tf.Session()
    agent = cartpole_pg.PolicyGradientAgent(hparams, sess)
    sess.run(tf.global_variables_initializer())
    observation = np.array([0, 0, 0, 0, 0])
    i_episode = 0
    random_x = -0.1
    random_dx = 0.5
    #0.2 0.2 f
    # 0.1 0.2 b
    lip = LIP.LIP(random_x, random_dx, 0.7, 0.001, 'left_leg')
    eval_param = 2
    for i in range(200000):
        fun(lip)
    random_x = -0.1
    random_dx = 0.5
    eval_param = 2
    lip.inster_function = fun
    lip.run()

