#! /usr/bin/env python
"""
Author: Richard W. Van Tassel
Program: acrobot.PY
Date: Saturday, February 25 2012
Description: An acrobot implementation based on Adam White's and Brian Tanner's code at http://library.rl-community.org/wiki/Acrobot_%28Java%29
             and kinematics function from Remi Munos's code at http://researchers.lille.inria.fr/~munos/variable/acrobot.html
"""
import math,random

class Acrobot(object):
    MAXTHETA1 = math.pi
    MAXTHETA2 = math.pi
    MAXTHETA1DOT = 4.0 * math.pi
    MAXTHETA2DOT = 9.0 * math.pi
    M1 = 40.0
    M2 = 40.0
    L1 = 40.0
    L2 = 40.0
    LC1 = 20.0
    LC2 = 20.0
    I1 = M1 * L1 ** 2.0
    I2 = M2 * L2 ** 2.0
    G = 9.8
    DT = 0.05
    NOISE = 0
    ACROBATGOALPOSITION = -75
   

    @classmethod
    def arm_kinematics(cls,p1, p2):
        """Gives the x and y coordinates of the elbow in (*j1_x,*j1_y)
        and the hand in (*j2_x,*j2_y). So a simple stick figure drawing of
        the arm is just two lines, the first from (0,0) to (j1x,j1y). The
        second from (j1x,j1y) to (j2x,j2y)"""
        
        p1 += math.pi * 1.5
        c1 = math.cos(p1)
        c12 = math.cos(p1+p2)
        s1 = math.sin(p1)
        s12 = math.sin(p1+p2)
        
        j1_x = cls.L1 * c1
        j1_y = cls.L1 * s1
        return  j1_x,j1_y,j1_x + cls.L2*c12,j1_y + cls.L2 * s12
    
    
    def __init__(self,theta1=0.0,theta2=0.0,theta1_dot=0.0,theta2_dot=0.0):
        self.state = (theta1,theta2,theta1_dot,theta2_dot)
        self.step = 0
        
    def reset(self):
        self.state = (0.0,0.0,0.0,0.0)
        self.step = 0
        
    
    def random_policy(self,theta1=1.5 * math.pi,theta2=0.0,theta1_dot=0.0,theta2_dot=0.0):
        return random.choice((0,1,2))
    
    def single_episode(self, policy = None):
        if policy is None: policy = self.random_policy

        trace = []
        next_action = policy(*self.state)
        while self.step < 1000:
            self.step += 1
            pstate, paction, reward, state = self.move(next_action)
            next_action = policy(*state)
            trace.append([pstate, paction, reward, state, next_action])
            if self.isTerminal():
                break
            

        return trace
    
    def isTerminal(self,):
        return abs(self.state[0]) <  0.5       
        #firstJointEndHeight = self.L1 * math.cos(self.state[0])
        #secondJointEndHeight = self.L2 * math.sin(math.pi / 2.0 - self.state[0] - self.state[1])
        #feet_height = -(firstJointEndHeight + secondJointEndHeight)
        #return (feet_height > self.ACROBATGOALPOSITION)

    def move(self,next_action):
        pstate = self.state[:]
        self.state = list(self.state)
        
        torque = next_action - 1.0
        noise = self.NOISE * 2.0 * (random.random()-.5)
        
        torque += noise
        
        torque *= 1000
            
        for i in range(4):
            d1 = self.M1 * (self.LC1 ** 2.0) + self.M2 * ((self.L1 ** 2.0) + 2.0 * self.L1 * self.LC2 * math.cos(self.state[1])) + self.I1 + self.I2
            d2 = self.M2 * ((self.LC2 ** 2.0) + self.L1 * self.LC2 * math.cos(self.state[1])) + self.I2
            
            phi_2 = self.M2 * self.LC2 * self.G * math.cos(self.state[0] + self.state[1] - math.pi / 2.0)
            phi_1 = (
                     -(self.M2 * self.L1 * self.LC2 * (self.state[3] ** 2.0) * math.sin(self.state[1])) 
                     - 2.0 * self.M2 * self.L1 * self.LC2 * self.state[2] * self.state[3] * math.sin(self.state[1])
                     + (self.M1 * self.LC1 + self.M2 * self.L1) * self.G * math.cos(self.state[0] - math.pi/2.0) + phi_2)
            
            theta2_ddot = ((torque + (d2 / d1) * phi_1 - self.M2 * self.L1 * self.LC2 * (self.state[2] ** 2.0) * math.sin(self.state[1]) - phi_2) /
                          (self.M2 * (self.LC2 ** 2.0) + self.I2 - (d2 ** 2.0) / d1))
            
            theta1_ddot = -(d2 * theta2_ddot + phi_1) / d1
            
            self.state[2] += theta1_ddot * self.DT
            self.state[3] += theta2_ddot * self.DT
            
            self.state[0] += self.state[2] * self.DT
            self.state[1] += self.state[3] * self.DT 
        
        
        if abs(self.state[2]) > self.MAXTHETA1DOT:
            self.state[2] = math.copysign(self.MAXTHETA1DOT, self.state[2])
            
        if abs(self.state[3]) > self.MAXTHETA2DOT:
            self.state[3] = math.copysign(self.MAXTHETA2DOT, self.state[3])
            
        if abs(self.state[1]) > math.pi:
            self.state[1] = math.copysign(math.pi,self.state[1])
            
        if abs(self.state[0]) > math.pi:
            self.state[0] = math.copysign(math.pi,self.state[0])
        
        self.state = tuple(self.state)
        
        return (pstate,next_action,0,self.state)
        
    @classmethod
    def nextState(cls,state, action):
        c = cls(*state)
        return c.move(action)[3]
    
    