# -*- coding: utf-8 -*-
import LIP
import sys
import matplotlib.pyplot as plt
import numpy as np
import load_trajectory as ld
import signal
sys.path.append("./pyDmps")
sys.path.append(".")
import dmp_discrete
import net
reload(net)
reload(LIP)

def cb(s, f):
    print 'recv signal', s
    RL.sess.close()
    sys.exit(0)

# action_set = {0:[-10.0,-10],
              # 1:[10,10],
              # 2:[10,-10],
              # 3:[10,0],
              # 4:[0,0],
              # 5:[0,10],
              # 6:[0,-10],
              # 7:[-10,0],
              # 8:[-10,10]}
action_set = {0:[-5.0,-5.0],
              1:[5.0,5.0],
              2:[5.0,-5.0],
              3:[5.0,0.0],
              4:[0, 0],
              5:[0, 5.0],
              6:[0, -5.0],
              7:[-5.0, 0],
              8:[-5.0, 5.0]}
def fun(lip):
    global i, external_force, RL, observation, i_episode
    #S: x dx ddx y[0] y[1]
    #A: dmp.goal[0](-0.7 0.7) tau(0.02,10.0) external_force[0] external_force[1]
    #R: down -10 not down 1

    action = RL.choose_action(observation)
    # if i%20 ==0:
        # print('act:', action)
    external_force = np.array(action_set[action])
    y, dy, ddy = dmp.step(tau=tau_value, external_force=external_force)
    lip.set_swing_foot_pos(y[0], y[1])
    #将观测，动作和回报存储起来
    observation_, reward, done, info = lip.update()
    if y[1] <= 0.0:
        if lip.stand_leg == 'left_leg':
            lip.switch_support_leg('right_leg')
            dmp.y0[0] = lip.left_foot_x
        else:
            lip.switch_support_leg('left_leg')
            dmp.y0[0] = lip.right_foot_x
        lip.update_orbital_energy()
        reward = 1.0/((lip.E - 0.02)**2+0.005)
        print(reward)
        dmp.goal[0] = 0.2 # set the goal_x
        dmp.reset_state()
    i = i + 1

    RL.store_transition(observation, action, reward)
    observation = observation_
    if done:
        ep_rs_sum = sum(RL.ep_rs)
        lip.reset(0.1, 0.1, 0.7, 0.001, 'left_leg')
        dmp.goal[0] = 0.2 # set the goal_x
        dmp.y0[0] = 0.0 # set the initial x
        dmp.reset_state()
        running_reward = ep_rs_sum
        i_episode = i_episode + 1
        print("episode:", i_episode, "rewards:", (running_reward))
        #每个episode学习一次
        vt = RL.learn()
        #智能体探索一步
#S: x dx ddx y[0] y[1] dy[0] dy[1]
#A: dmp.goal[0](-0.7 0.7) tau(0.02,10.0) external_force[0] external_force[1]
#R: down -10 not down 1
if __name__ == "__main__":
    i = 0
    external_force = np.array([0, 0])
    signal.signal(signal.SIGTERM, cb)
    signal.signal(signal.SIGINT, cb)
    trajectory = ld.loadTrajectory("swing_trajectory.txt")
    trajectory_x = trajectory[:, 0]
    trajectory_y = trajectory[:, 1]
    plt.plot(trajectory_x, trajectory_y, 'r--', lw=2)
    dmp = dmp_discrete.DMPs_discrete(dmps=2, bfs=100)
    dmp.imitate_path(y_des=np.array([trajectory_x, trajectory_y]))
    dmp.goal[0] = 0.2
    dmp.y0[0] = 0.0
    dmp.reset_state()
    data_point_num = 300
    tau_value = (1 / dmp.dt)/ data_point_num
    RL = net.PolicyGradient(
        n_actions=9,
        n_features=5,
        learning_rate=0.02,
        reward_decay=0.99,
    )
    observation = np.array([0, 0, 0, 0, 0])
    i_episode = 0
    lip = LIP.LIP(0.1, 0.1, 0.7, 0.001, 'left_leg')
    lip.inster_function = fun
    lip.run()

