# George Chia
import logging
import torch
import numpy as np
from numpy.linalg import norm
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionRot, ActionXY
from crowd_sim.envs.utils.state import tensor_to_joint_state
from crowd_sim.envs.utils.utils import point_to_segment_dist
from crowd_nav.policy.graph_model import RGL, DVN
from crowd_nav.policy.value_estimator import ValueEstimator_garl
from crowd_nav.policy.model_predictive_rl import ModelPredictiveRL
from crowd_sim.envs.utils.state import ObservableState, FullState
from crowd_sim.envs.utils.state import JointState

class GARL(ModelPredictiveRL):
    def __init__(self):
        super().__init__()
        self.name = 'GARL'

    def configure(self, config):
        self.set_common_parameters(config)
        self.do_action_clip = config.garl.do_action_clip
        if hasattr(config.garl, 'sparse_search'):
            self.sparse_search = config.garl.sparse_search
        # self.planning_width = config.garl.planning_width  # 以未来多少步计算value
        # self.linear_state_predictor = config.garl.linear_state_predictor
        # if self.linear_state_predictor:
            # self.state_predictor = LinearStatePredictor(config, self.time_step)  # 不使用预测了，直接问环境索取
        graph_model = DVN(config, self.robot_state_dim, self.human_state_dim)
        self.value_estimator = ValueEstimator_garl(config, graph_model)
        self.model = [graph_model, self.value_estimator.value_network]



    def set_common_parameters(self, config):
        self.gamma = config.rl.gamma
        self.kinematics = config.action_space.kinematics
        self.sampling = config.action_space.sampling
        self.speed_samples = config.action_space.speed_samples
        self.rotation_samples = config.action_space.rotation_samples
        self.rotation_constraint = config.action_space.rotation_constraint

    def get_state_dict(self):
        return {
            'graph_model': self.value_estimator.graph_model.state_dict(),
            'value_network': self.value_estimator.value_network.state_dict()
        }

    def load_state_dict_rl(self, state_dict):
        self.value_estimator.graph_model.load_state_dict(state_dict['graph_model'])
        self.value_estimator.value_network.load_state_dict(state_dict['value_network'])

    def predict(self, state):
        if self.phase is None or self.device is None:
            raise AttributeError('Phase, device attributes have to be set!')
        if self.phase == 'train' and self.epsilon is None:
            raise AttributeError('Epsilon attribute has to be set in training phase')

        if self.reach_destination(state):
            return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
        if self.action_space is None:
            self.build_action_space(state.robot_state.v_pref)

        probability = np.random.random()
        if self.phase == 'train' and probability < self.epsilon:
            max_action = self.action_space[np.random.choice(len(self.action_space))]
        else:
            max_action = None
            max_value = float('-inf')
            max_traj = None

            if self.do_action_clip:
                state_tensor = state.to_tensor(add_batch_size=True, device=self.device)
                action_space_clipped = self.action_clip(state_tensor, self.action_space, self.planning_width)
            else:
                action_space_clipped = self.action_space   # 我就直接没用，因为我没用蒙特卡罗树搜索的方法

            for action in action_space_clipped:
                next_self_state = self.propagate(state.robot_state, action)
                next_human_states, reward, done, info = self.env.onestep_lookahead(action)
                next_state = JointState(next_self_state, next_human_states)
                next_state_tensor = next_state.to_tensor(add_batch_size=True, device=self.device)
                next_state_value = self.value_estimator(next_state_tensor).data.item()
                value = reward + pow(self.gamma, self.time_step * state.robot_state.v_pref) * next_state_value
                if value > max_value:
                    max_value = value
                    max_action = action
            if max_action is None:
                raise ValueError('Value network is not well trained. ')

        if self.phase == 'train':
            self.last_state = self.transform(state)

        return max_action


    def propagate(self, state, action):   # 这一步就是假设机器人在下一段时间内还能保持当前的动作
        if isinstance(state, ObservableState):
            # propagate state of humans
            next_px = state.px + action.vx * self.time_step   # time_step = 0.25
            next_py = state.py + action.vy * self.time_step
            next_state = ObservableState(next_px, next_py, action.vx, action.vy, state.radius)
        elif isinstance(state, FullState):
            # propagate state of current agent
            # perform action without rotation
            if self.kinematics == 'holonomic':
                next_px = state.px + action.vx * self.time_step
                next_py = state.py + action.vy * self.time_step
                next_state = FullState(next_px, next_py, action.vx, action.vy, state.radius,
                                       state.gx, state.gy, state.v_pref, state.theta)
            else:
                next_theta = state.theta + action.r
                next_vx = action.v * np.cos(next_theta)
                next_vy = action.v * np.sin(next_theta)
                next_px = state.px + next_vx * self.time_step
                next_py = state.py + next_vy * self.time_step
                next_state = FullState(next_px, next_py, next_vx, next_vy, state.radius, state.gx, state.gy,
                                       state.v_pref, next_theta)
        else:
            raise ValueError('Type error')

        return next_state

    def set_time_step(self, time_step):
        self.time_step = time_step