from distutils.log import error
from logging import warning
import numpy
import scvi
from scvi.model._scvi import SCVI
from scvi._compat import Literal
from typing import List, Optional
from scvi.utils import setup_anndata_dsp
from scvi.dataloaders import DataSplitter
from scvi.data.fields import (
    CategoricalJointObsField,
    CategoricalObsField,
    LayerField,
    NumericalJointObsField,
    NumericalObsField,
)
from scvi.data import AnnDataManager
from scvi import REGISTRY_KEYS
from anndata import AnnData
from scvi.module.base import auto_move_data
import torch


def kl_guassian(mu1, mu2, sigma1, sigma2):
    """
    Compute the KL divergence between two Gaussian distributions.
    """
    return (0.5 * torch.log(sigma2 / sigma1) + (sigma1 ** 2 + (mu1 - mu2) ** 2) / (2 * sigma2 ** 2)).sum()


class ScSemiVI(SCVI):
    def __init__(
        self,
        adata: AnnData,
        label_key: Optional[str] = None,
        n_hidden: int = 128,
        n_latent: int = 10,
        n_layers: int = 1,
        dropout_rate: float = 0.1,
        dispersion: Literal["gene", "gene-batch",
                            "gene-label", "gene-cell"] = "gene",
        gene_likelihood: Literal["zinb", "nb", "poisson"] = "zinb",
        latent_distribution: Literal["normal", "ln"] = "normal",
        **model_kwargs,
    ):
        super(ScSemiVI, self).__init__(adata, n_hidden,
                                       n_latent, n_layers,
                                       dropout_rate, dispersion,
                                       gene_likelihood, latent_distribution,
                                       **model_kwargs)
        self.nlabels = len(set(adata.obs[label_key]))
        self.elbo_hist = []

    @staticmethod
    def label2num(adata, label_key):
        if label_key is None:
            warning("No label key provided, using default label key: 'cell_type'")
            label_key = "cell_type"
        cell_type = adata.obs.get(label_key, None)
        if cell_type is None:
            error("{} is not a valid label key!".format(label_key))
        d, cnt = set(), 0
        labels = numpy.zeros(len(cell_type), dtype=int) - 1
        for t in cell_type:
            if t in d:
                continue
            d.add(t)
            labels[cell_type == t] = cnt
            cnt += 1
        return labels

    def fine_tune_loss(self, means: List[torch.tensor], vars: List[torch.tensor],
                       qz_m, qz_v, cell_type):
        for m in means:
            m.zero_()
        for v in vars:
            v.zero_()
        cell_type = cell_type.reshape(-1)

        ctl = cell_type.unique()
        nct = len(ctl)
        l1 = 0.0
        l2 = 0.0

        for i in ctl:
            idxs = cell_type == i
            qms = qz_m[idxs, :]
            qvs = qz_v[idxs, :]
            means[i] = qms.mean(0)
            vars[i] = qvs.mean(0)
            l1 += kl_guassian(qms, means[i], qvs, vars[i])
        l1 /= len(cell_type)

        for i in range(nct):
            for j in range(i + 1, nct):
                i1, i2 = ctl[i], ctl[j]
                l2 -= kl_guassian(means[i1], means[i2], vars[i1], vars[i2]) + \
                    kl_guassian(means[i2], means[i1], vars[i2], vars[i1])
        l2 /= (nct - 1) * nct

        return l1 + l2

    def fine_tune(self, record: bool = True,
                  batch_size=256, max_iter: int = 20,
                  lr: float = 2e-5, device: str = "cpu"):
        self.elbo_hist = []
        self.elbo_hist.extend(
            self.history['elbo_train']['elbo_train'].values.tolist())
        self.module.train()
        data_splitter = DataSplitter(
            self.adata_manager,
            train_size=0.9,
            validation_size=None,
            batch_size=batch_size,
            use_gpu=False,
        )
        data_splitter.setup()
        train_data = data_splitter.train_dataloader()
        optimizer = torch.optim.Adam(self.module.z_encoder.parameters(), lr=lr)

        means = [torch.zeros(self.module.n_latent, dtype=torch.float32)
                 for _ in range(self.nlabels)]
        vars = [torch.zeros(self.module.n_latent, dtype=torch.float32)
                for _ in range(self.nlabels)]

        for i in range(max_iter):
            print('Iteration: {}/{}'.format(i, max_iter))
            for td in train_data:
                X, batch, cell_type = td['X'], td['batch'], td['labels']
                X, batch, cell_type = X.to(device), batch.to(device), cell_type.to(device)
                cell_type = cell_type.to(torch.uint8)
                out = self.inference(X, batch)
                qz_m, qz_v = out['qz_m'], out['qz_v']
                l = self.fine_tune_loss(means, vars, qz_m, qz_v, cell_type)
                optimizer.zero_grad()
                l.backward()
                optimizer.step()
            if record:
                self.module.eval()
                self.elbo_hist.append(-self.get_elbo().item())
                self.module.train()

    @auto_move_data
    def inference(self, x, batch_index):
        """
        High level inference method.

        Runs the inference (encoder) model.
        """
        encoder_input = torch.log(1 + x)

        categorical_input = tuple()
        qz_m, qz_v, _ = self.module.z_encoder(
            encoder_input, batch_index, *categorical_input)

        outputs = dict(qz_m=qz_m, qz_v=qz_v)
        return outputs

    @classmethod
    @setup_anndata_dsp.dedent
    def setup_anndata(
        cls,
        adata: AnnData,
        layer: Optional[str] = None,
        batch_key: Optional[str] = None,
        labels_key: Optional[str] = None,
        size_factor_key: Optional[str] = None,
        categorical_covariate_keys: Optional[List[str]] = None,
        continuous_covariate_keys: Optional[List[str]] = None,
        **kwargs,
    ):
        """
        %(summary)s.

        Parameters
        ----------
        %(param_layer)s
        %(param_batch_key)s
        %(param_labels_key)s
        %(param_size_factor_key)s
        %(param_cat_cov_keys)s
        %(param_cont_cov_keys)s
        """
        setup_method_args = cls._get_setup_method_args(**locals())
        anndata_fields = [
            LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
            CategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key),
            CategoricalObsField(REGISTRY_KEYS.LABELS_KEY, labels_key),
            NumericalObsField(
                REGISTRY_KEYS.SIZE_FACTOR_KEY, size_factor_key, required=False
            ),
            CategoricalJointObsField(
                REGISTRY_KEYS.CAT_COVS_KEY, categorical_covariate_keys
            ),
            NumericalJointObsField(
                REGISTRY_KEYS.CONT_COVS_KEY, continuous_covariate_keys
            ),
        ]
        adata_manager = AnnDataManager(
            fields=anndata_fields, setup_method_args=setup_method_args
        )
        adata_manager.register_fields(adata, **kwargs)
        cls.register_manager(adata_manager)
