from turtle import position
import numpy as np
import math

from deepbots.supervisor.controllers.supervisor_emitter_receiver import SupervisorCSV
from torch import true_divide
from utilities import normalizeToRange, plotData


class KcgRobotSupervisor(SupervisorCSV):
    """
    
    """

    def __init__(self):
        """
        In the constructor, the agent object is created, the robot is spawned in the world via respawnRobot().
        References to robot and the pole endpoint are initialized here, used for building the observation.
        When in test mode (self.test = True) the agent stops being trained and picks actions in a non-stochastic way.
        """
        print("Robot is spawned in code, if you want to inspect it pause the simulation.")
        super().__init__()
        # observation and action spaces are set as tuples, because that's how DDPG agent expects them
        self.observationSpace = (14,) # 8 motor + 3 gps + 3 imu
        self.actionSpace = (6,) # 4 move (vl,vr,wl,wr) + 2 rotation (vf,vb)
        self.robot = None
        self.respawnRobot()

        #self.poleEndpoint = self.supervisor.getFromDef("POLE_ENDPOINT")
        self.messageReceived = None  # Variable to save the messages received from the robot

        self.episodeScore = 0  # Score accumulated during an episode
        self.episodeScoreList = []  # A list to save all the episode scores, used to check if task is solved
        self.test = False  # Whether the agent is in test mode

    def get_observations(self):
        """
        This get_observation implementation builds the required observation for the CartPole problem.
        All values apart from pole angle are gathered here from the robot and poleEndpoint objects.

        All values are normalized appropriately to [-1, 1], according to their original ranges.

        :return: Observation: [cartPosition, cartVelocity, poleAngle, poleTipVelocity]
        :rtype: list
        """
        # # Position on y axis    归一化y轴
        #cartPosition = normalizeToRange(self.robot.getPosition()[1], -1.0, 1.0, -1.0, 1.0)
        # # Linear velocity on z axis
        # cartVelocity = normalizeToRange(self.robot.getVelocity()[2], -0.5, 0.5, -1.0, 1.0, clip=True)

        # self.messageReceived = self.handle_receiver()

        # # Angular velocity x of endpoint
        # endpointVelocity = normalizeToRange(self.poleEndpoint.getVelocity()[3], -1.5, 1.5, -1.0, 1.0, clip=True)

        # return [cartPosition, cartVelocity, poleAngle, endpointVelocity]

        self.messageReceived = self.handle_receiver()  # update message received from robot
        if self.messageReceived is not None:
            message = list(map(float, self.messageReceived))
        else:
            message = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        self.robot_x = message[8]
        self.robot_y = message[9]
        self.robot_z = message[10]
        
        print("observation: ",message)

        return message

    def get_reward(self, action=None):
        """
        """
        # print("messageReceived:" ,self.messageReceived)
        # print(type(self.messageReceived))
        current_position = [self.robot_x, self.robot_y, self.robot_z]
        target_position = [2.1, 0.55, 0.045]
        reward1 = self.robot_y
        self.distance = math.sqrt(sum([(a - b)**2 for (a,b) in zip(current_position,target_position)]))
        reward2 = self.distance
        print("reward2: ", reward2)
        reward = 0.2*reward1 + 1/reward2 *0.8
        print("every step reward:", reward)

        return reward

    def is_done(self):
        """
        An episode is done if the score is over 195.0, or if the pole is off balance, or the cart position is on the
        arena's edges.

        :return: True if termination conditions are met, False otherwise
        :rtype: bool
        """
        if self.episodeScore > 500.0:
            return True
        
        if self.robot_y > 0.6:
            return True

        # if self.messageReceived is not None:
        #     poleAngle = round(float(self.messageReceived[0]), 2)
        # else:
        #     # method is called before messageReceived is initialized
        #     poleAngle = 0.0
        # if abs(poleAngle) > 0.261799388:  # 15 degrees off vertical
        #     return True

        # cartPosition = round(self.robot.getPosition()[2], 2)  # Position on z axis
        # if abs(cartPosition) > 0.39:
        #    return True

        return False

    def reset(self):
        """
        Reset calls respawnRobot() method and returns starting observation.
        :return: Starting observation zero vector
        :rtype: list
        """
        # TODO This method will change in Webots R2020a rev2, to a general reset simulation method
        self.respawnRobot()
        return [0.0 for _ in range(self.observationSpace[0])]

    def respawnRobot(self):
        """
        This method reloads the saved CartPole robot in its initial state from the disk.
        """
        # TODO This method will be removed in Webots R2020a rev2
        if self.robot is not None:
            # Despawn existing robot
            self.robot.remove()

        # Respawn robot in starting position and state
        rootNode = self.supervisor.getRoot()  # This gets the root of the scene tree
        childrenField = rootNode.getField('children')  # This gets a list of all the children, ie. objects of the scene
        # childrenField.importMFNode(-2, "../../worlds/Robot_1.wbo")  # Load robot from file and add to second-to-last position
        childrenField.importMFNode(-2, "Robot_1.wbo")  # Load robot from file and add to second-to-last position

        # Get the new robot and pole endpoint references
        self.robot = self.supervisor.getFromDef("Kcg_robot")
        #self.poleEndpoint = self.supervisor.getFromDef("POLE_ENDPOINT")
        # Reset the simulation physics to start over
        self.supervisor.simulationResetPhysics()
        # self.supervisor.simulationReset()

    def get_info(self):
        """
        Dummy implementation of get_info.

        :return: None
        :rtype: None
        """
        return None

    def solved(self):
        """
        This method checks whether the CartPole task is solved, so training terminates.
        Solved condition requires that the average episode score of last 100 episodes is over 195.0.

        :return: True if task is solved, False otherwise
        :rtype: bool
        """
        # if len(self.episodeScoreList) > 100:  # Over 100 trials thus far
        #     if np.mean(self.episodeScoreList[-100:]) > 195.0:  # Last 100 episode scores average value
        #         return True
        # return False
        
        if self.distance < 0.4:
            return True
        else: 
            return False
