import numpy as np
import torch
import time
from typing import Any, Mapping, Sequence, Tuple
from rsl_rl.algorithms import PPO
from rsl_rl.modules import ActorCritic


class PolicyController:
    def __init__(
            self,
            cfg: Any,
            desired_height: float,
            model_path: str,
            device: torch.device
    ):
        self._cfg = cfg
        self.desired_height = desired_height

        self.model_path = model_path
        self.device = device

        # Initialize torch tensors which will contain robot states
        self.base_acc_z = torch.zeros(1, 1, dtype=torch.float32, device=self.device, requires_grad=False)
        self.height = torch.zeros(1, 1, dtype=torch.float32, device=self.device, requires_grad=False)
        self.distance = torch.zeros(1, 1, dtype=torch.float32, device=self.device, requires_grad=False)
        self.observations = torch.zeros(1, 3, dtype=torch.float32, device=self.device, requires_grad=False)
        self.actions = torch.zeros(1, 1, dtype=torch.float32, device=self.device, requires_grad=False)

        # Initialize policy net
        self.actor_critic = ActorCritic(num_actor_obs=3,
                            num_critic_obs=3,
                            num_actions=1,
                            actor_hidden_dims=[50],
                            critic_hidden_dims=[50],
                            activation='sigmoid',
                            init_noise_std=1.0).to(self.device)

        self.alg = PPO(actor_critic=self.actor_critic,
                       num_learning_epochs=5,
                       num_mini_batches=4,
                       clip_param=0.2,
                       gamma=0.99,
                       lam=0.95,
                       value_loss_coef=1.0,
                       entropy_coef=0.01,
                       learning_rate=1e-3,
                       max_grad_norm=1.0,
                       use_clipped_value_loss=True,
                       schedule="adaptive",
                       desired_kl=0.01,
                       device=self.device
                       )

        # Load policy net weights
        loaded_dict = torch.load(self.model_path, map_location=torch.device('cpu'))
        self.alg.actor_critic.load_state_dict(loaded_dict['model_state_dict'])
        self.alg.optimizer.load_state_dict(loaded_dict['optimizer_state_dict'])
        self.current_learning_iteration = loaded_dict['iter']
        print('Successfully load: ', self.model_path)

        # Sets the module in evaluation mode.
        self.alg.actor_critic.eval()
        self.alg.actor_critic.to(self.device)
        self.policy = self.alg.actor_critic.act_inference

        self.control_decimation = 1
        self.control_step_cnt = 0
        self.action_scale = 1.

    def reset(self):
        raise NotImplementedError

    def update(self, height, accZ):
        """acquire observation and step policy net once
        """
        # obs_start_time = time.time()
        self.observations = self._compute_policy_obs(height, accZ)
        # print('obs time:', time.time()-obs_start_time)

        # print(self.control_step_cnt)
        if self.control_step_cnt % self.control_decimation == 0:
            # inference_start_time = time.time()
            policy_outputs = self._policy_step(self.observations)
            # print('inference:', time.time()-inference_start_time)
            self.actions = torch.clip(policy_outputs, -1, 1)
            self.control_step_cnt = 0
        self.control_step_cnt += 1

    def get_action(self):
        """compute hybrid action by using pd controller
        """
        return self.actions * self.action_scale

    def _compute_policy_obs(self, height, accZ):
        """ observations
                body_height : 1
                base_acc_z : 1
                distance: 1
        """
        # get_height_start_time = time.time()
        body_height = np.array([height], dtype=np.float32) # 90 ms
        # print('get height time:', time.time()-get_height_start_time)
        self.height = torch.tensor(body_height, dtype=torch.float32, device=self.device)
        # get_acc_start_time = time.time()
        base_acc_z = np.array([accZ], dtype=np.float32) # 90 ms
        # print('get acc time:', time.time()-get_acc_start_time)
        self.base_acc_z = torch.tensor(base_acc_z, dtype=torch.float32, device=self.device)
        self.distance = self.height - self.desired_height

        # print("----------------------------------------------------------------")
        # print("body_height: ", self.height)
        # print("base_acc_z: ", self.base_acc_z)
        # print("distance: ", self.distance)

        observations = torch.cat((self.height * 1.0,
                                  self.base_acc_z * 0.1,
                                  self.distance * 1.0,
                                  ), dim=-1).to(self.device)
        return observations

    def _policy_step(self, observation):
        """policy step once
        """
        actions = self.policy(observation)
        # print('obs:', observation, ' action:', actions)
        return actions
