# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union

import numpy as np
import torch
from scipy.stats import ks_2samp

import mbrl.models.util as model_util
import mbrl.types
import mbrl.util.math
from mbrl.util.replay_buffer import BootstrapIterator, TransitionIterator

from .model import Ensemble, Model
from .parallel_gaussian_mlp import ParallelGaussianMLP

MODEL_LOG_FORMAT = [
    ("train_iteration", "I", "int"),
    ("epoch", "E", "int"),
    ("train_dataset_size", "TD", "int"),
    ("val_dataset_size", "VD", "int"),
    ("model_loss", "MLOSS", "float"),
    ("model_score", "MSCORE", "float"),
    ("model_val_score", "MVSCORE", "float"),
    ("model_best_val_score", "MBVSCORE", "float"),
]


class CausalDiscoveryModel(Model):
    def __init__(
            self,
            model: ParallelGaussianMLP,
            time_step: float = 1,
            target_is_delta: bool = True,
            normalize: bool = False,
            normalize_double_precision: bool = False,
            learned_rewards: bool = True,
            obs_process_fn: Optional[mbrl.types.ObsProcessFnType] = None,
            no_delta_list: Optional[List[int]] = None,
    ):
        super().__init__(model.device)
        self.model = model
        self.time_step = time_step
        self.input_normalizer: Optional[mbrl.util.math.Normalizer] = None
        if normalize:
            self.input_normalizer = mbrl.util.math.Normalizer(
                self.model.in_size,
                self.model.device,
                dtype=torch.double if normalize_double_precision else torch.float,
            )
        self.device = self.model.device
        self.learned_rewards = learned_rewards
        self.target_is_delta = target_is_delta
        self.no_delta_list = no_delta_list if no_delta_list else []
        self.obs_process_fn = obs_process_fn

        self.num_elites = self.model.num_members

    def _get_model_input(
            self,
            obs: mbrl.types.TensorType,
            action: mbrl.types.TensorType,
    ) -> torch.Tensor:
        if self.obs_process_fn:
            obs = self.obs_process_fn(obs)
        obs = model_util.to_tensor(obs).to(self.device)
        action = model_util.to_tensor(action).to(self.device)
        model_in = torch.cat([obs, action], dim=obs.ndim - 1)
        if self.input_normalizer:
            # Normalizer lives on device
            model_in = self.input_normalizer.normalize(model_in).float().to(self.device)
        return model_in

    def _process_batch(
            self, batch: mbrl.types.TransitionBatch, _as_float: bool = False
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        obs, action, next_obs, reward, _ = batch.astuple()
        if self.target_is_delta:
            target_obs = next_obs - obs
            for dim in self.no_delta_list:
                target_obs[..., dim] = next_obs[..., dim]
            target_obs = target_obs / self.time_step
        else:
            target_obs = next_obs
        target_obs = model_util.to_tensor(target_obs).to(self.device)

        model_in = self._get_model_input(obs, action)
        if self.learned_rewards:
            reward = model_util.to_tensor(reward).to(self.device).unsqueeze(reward.ndim)
            target = torch.cat([target_obs, reward], dim=obs.ndim - 1)
        else:
            target = target_obs

        return model_in.float(), target.float()

    def forward(self, x: torch.Tensor, *args, **kwargs) -> Tuple[torch.Tensor, ...]:
        """Calls forward method of base model with the given input and args."""
        return self.model.forward(x, *args, **kwargs)

    def update_normalizer(self, batch: mbrl.types.TransitionBatch):
        """Updates the normalizer statistics using the batch of transition data.

        The normalizer will compute mean and standard deviation the obs and action in
        the transition. If an observation processing function has been provided, it will
        be called on ``obs`` before updating the normalizer.

        Args:
            batch (:class:`mbrl.types.TransitionBatch`): The batch of transition data.
                Only obs and action will be used, since these are the inputs to the model.
        """
        if self.input_normalizer is None:
            return
        obs, action = batch.obs, batch.act
        if obs.ndim == 1:
            obs = obs[None, :]
            action = action[None, :]
        if self.obs_process_fn:
            obs = self.obs_process_fn(obs)
        model_in_np = np.concatenate([obs, action], axis=obs.ndim - 1)
        self.input_normalizer.update_stats(model_in_np)

    def loss(
            self,
            batch: mbrl.types.TransitionBatch,
            target: Optional[torch.Tensor] = None,
    ) -> Tuple[torch.Tensor, Dict[str, Any]]:
        """Computes the model loss over a batch of transitions.

        This method constructs input and targets from the information in the batch,
        then calls `self.model.loss()` on them and returns the value and the metadata
        as returned by the model.

        Args:
            batch (transition batch): a batch of transition to train the model.

        Returns:
            (tensor and optional dict): as returned by `model.loss().`
        """
        assert target is None
        model_in, target = self._process_batch(batch)
        return self.model.loss(model_in, target=target)

    def update(
            self,
            batch: mbrl.types.TransitionBatch,
            optimizer: torch.optim.Optimizer,
            target: Optional[torch.Tensor] = None,
    ) -> Tuple[torch.Tensor, Dict[str, Any]]:
        """Updates the model given a batch of transitions and an optimizer.

        Args:
            batch (transition batch): a batch of transition to train the model.
            optimizer (torch optimizer): the optimizer to use to update the model.

        Returns:
            (tensor and optional dict): as returned by `model.loss().`
        """
        assert target is None
        model_in, target = self._process_batch(batch)
        return self.model.update(model_in, optimizer, target=target)

    @staticmethod
    def guassian_kl_divergence(p_mean, p_logvar, q_mean, q_logvar):
        return q_logvar - p_logvar + (torch.exp(p_logvar) + (p_mean - q_mean) ** 2) / (2 * torch.exp(q_logvar)) - 0.5

    def eval_score(
            self,
            batch: mbrl.types.TransitionBatch,
            target: Optional[torch.Tensor] = None,
    ) -> Tuple[torch.Tensor, Dict[str, Any]]:
        """Evaluates the model score over a batch of transitions.

        This method constructs input and targets from the information in the batch,
        then calls `self.model.eval_score()` on them and returns the value.

        Args:
            batch (transition batch): a batch of transition to train the model.

        Returns:
            (tensor): as returned by `model.eval_score().`
        """
        assert target is None
        with torch.no_grad():
            model_in, target = self._process_batch(batch)
            return self.model.eval_score(model_in, target=target)

    def eval(
            self,
            batch: mbrl.types.TransitionBatch,

    ) -> Tuple[torch.Tensor, Dict[str, Any]]:
        logical_in_size = self.model.in_size
        logical_out_size = self.model.out_size

        mse_loss, meta = self.eval_score(batch)
        with torch.no_grad():
            model_in, target = self._process_batch(batch)

            batch_size = len(model_in)
            model_in = model_in.repeat((self.parallel_size, self.ensemble_size, 1, 1))
            pred_mean, pred_logvar = self.model.forward(model_in, use_propagation=False)

            print(pred_mean.shape)
            divergences = torch.empty(logical_out_size, logical_in_size, self.ensemble_size, device=self.device)
            relative_loss = torch.empty(logical_out_size, logical_in_size, self.ensemble_size, device=self.device)
            ks_2samp_pvalue = torch.empty(logical_out_size, logical_in_size, self.ensemble_size, device=self.device)

            for i in range(logical_out_size):
                for j in range(logical_in_size):
                    d = self.guassian_kl_divergence(pred_mean[j, :, :, i], pred_logvar[j, :, :, i],
                                                    pred_mean[-1, :, :, i],
                                                    pred_logvar[-1, :, :, i]).mean(dim=1)
                    divergences[i][j] = d.reshape(-1)

                    for k in range(self.ensemble_size):
                        d1 = mse_loss[j, :, :, i][k].reshape(-1).cpu().numpy()
                        d2 = mse_loss[-1, :, :, i][k].reshape(-1).cpu().numpy()
                        ks_2samp_pvalue[i, j, k] = ks_2samp(d1, d2).pvalue

                    l = ((mse_loss[j, :, :, i].mean(dim=1) - mse_loss[-1, :, :, i].mean(dim=1)) / torch.abs(
                        mse_loss[j, :, :, i].mean(dim=1)) ** 0.5)
                    relative_loss[i][j] = l.reshape(-1)

        return divergences, relative_loss, ks_2samp_pvalue

    def get_output_and_targets(
            self, batch: mbrl.types.TransitionBatch
    ) -> Tuple[Tuple[torch.Tensor, ...], torch.Tensor]:
        """Returns the model output and the target tensors given a batch of transitions.

        This method constructs input and targets from the information in the batch,
        then calls `self.model.forward()` on them and returns the value.
        No gradient information will be kept.

        Args:
            batch (transition batch): a batch of transition to train the model.

        Returns:
            (tuple(tensor), tensor): the model outputs and the target for this batch.
        """
        with torch.no_grad():
            model_in, target = self._process_batch(batch)
            output = self.model.forward(model_in)
        return output, target

    def save(self, save_dir: Union[str, pathlib.Path]):
        self.model.save(save_dir)
        if self.input_normalizer:
            self.input_normalizer.save(save_dir)

    def load(self, load_dir: Union[str, pathlib.Path]):
        self.model.load(load_dir)
        if self.input_normalizer:
            self.input_normalizer.load(load_dir)

    def causal_discovery(self, dataset):
        if isinstance(dataset, BootstrapIterator):
            dataset.toggle_bootstrap()

        divergence_list = []
        relative_loss_list = []
        ks_2samp_pvalue_list = []
        for batch in dataset:
            divergence, relative_loss, ks_2samp_pvalue = self.eval(batch)
            divergence_list.append(divergence)
            relative_loss_list.append(relative_loss)
            ks_2samp_pvalue_list.append(ks_2samp_pvalue)
        divergences = torch.cat(divergence_list, dim=-1)
        relative_losses = torch.cat(relative_loss_list, dim=-1)
        ks_2samp_pvalues = torch.cat(ks_2samp_pvalue_list, dim=-1)

        torch.set_printoptions(precision=4, sci_mode=False)
        oracle = torch.tensor([[0, 0, 1, 0, 0],  # dot x
                               [0, 0, 0, 1, 0],  # dot theta
                               [0, 1, 0, 1, 1],  # dot v
                               [0, 1, 0, 1, 1],  # dot omega
                               [0, 0, 0, 0, 0]], device=self.device)  # reward
        # print("divergences:\n", divergences.mean(dim=-1))
        # print("relative_losses:\n", relative_losses.mean(dim=-1))
        d_mask = self.threhshold(divergences, 0.5)
        l_mask = self.threhshold(relative_losses, 0.02)
        # print("d_mask:\n", divergences.mean(dim=-1))
        print("l_mask:\n", l_mask)

        mask = l_mask

        return mask

    def median_threhshold(self, tensor):
        mask = torch.zeros(tensor.shape, device=self.device)
        for i in range(len(tensor)):
            m = torch.median(tensor[i])
            mask[i][tensor[i] > m] = 1
        return mask.mean(-1)

    def threhshold(self, tensor, t):
        mask = torch.zeros(tensor.shape, device=self.device)
        mask[tensor > t] = 1
        return mask.mean(-1)

    def set_elite(self, elite_indices: Sequence[int]):
        self.model.set_elite(elite_indices)

    @property
    def ensemble_size(self):
        return self.model.num_members

    @property
    def parallel_size(self):
        if hasattr(self.model, "parallel_size"):
            return self.model.parallel_size
        else:
            return 0

    def set_propagation_method(self, propagation_method: Optional[str] = None):
        if isinstance(self.model, Ensemble):
            self.model.set_propagation_method(propagation_method)
