from copy import deepcopy
import os
import StreamLearn.Algorithm.TTA.iabn as iabn
import StreamLearn.Algorithm.TTA.memory as memory
import torch
import torch.nn as nn
import torch.jit
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from   scipy.optimize import fsolve
from   StreamLearn.Base.SemiEstimator import StreamEstimator

__all__ = ["ODS"]

class LabelDistributionQueue:
    def __init__(self, num_class, capacity=None):
        if capacity is None: capacity = num_class * 20
        self.queue_length = capacity
        self.queue = torch.zeros(self.queue_length)
        self.pointer = 0
        self.num_class = num_class
        self.size = 0

    def update(self, tgt_preds):
        tgt_preds = tgt_preds.detach().cpu()
        batch_sz = tgt_preds.shape[0]
        self.size += batch_sz
        if self.pointer + batch_sz > self.queue_length: # Deal with wrap around when ql % batchsize != 0 
            rem_space = self.queue_length - self.pointer
            self.queue[self.pointer: self.queue_length] = (tgt_preds[:rem_space] + 1)
            self.queue[0: batch_sz - rem_space] = (tgt_preds[rem_space:]+1)
        else: 
            self.queue[self.pointer: self.pointer + batch_sz] = (tgt_preds+1)
        self.pointer = (self.pointer + batch_sz) % self.queue_length
    
    def get(self,):
        bincounts = torch.bincount(self.queue.long(), minlength=self.num_class+1).float() / self.queue_length
        bincounts = bincounts[1: ]
        if bincounts.sum() == 0: bincounts[:] = 1
        # log_q = torch.log(bincounts + 1e-12).detach().cuda()
        return bincounts
    
    
    def full(self):
        return self.size >= self.queue_length

class AffinityMatrix:
    def __init__(self, **kwargs):
        pass
    def __call__(X, **kwargs):
        raise NotImplementedError
    def is_psd(self, mat):
        eigenvalues = torch.eig(mat)[0][:, 0].sort(descending=True)[0]
        return eigenvalues, float((mat == mat.t()).all() and (eigenvalues >= 0).all())
    def symmetrize(self, mat):
        return 1 / 2 * (mat + mat.t())

class rbf_affinity(AffinityMatrix):
    def __init__(self, sigma: float, **kwargs):
        self.sigma = sigma
        self.k = kwargs['knn']
    def __call__(self, X):
        N = X.size(0)
        dist = torch.norm(X.unsqueeze(0) - X.unsqueeze(1), dim=-1, p=2)  # [N, N]
        n_neighbors = min(self.k, N)
        kth_dist = dist.topk(k=n_neighbors, dim=-1, largest=False).values[:, -1]  # compute k^th distance for each point, [N, knn + 1]
        sigma = kth_dist.mean()
        rbf = torch.exp(- dist ** 2 / (2 * sigma ** 2))
        # mask = torch.eye(X.size(0)).to(X.device)
        # rbf = rbf * (1 - mask)
        return rbf

def entropy_energy(Y, unary, pairwise, bound_lambda):
    E = (unary * Y - bound_lambda * pairwise * Y + Y * torch.log(Y.clip(1e-20))).sum()
    return E

def laplacian_optimization(unary, kernel, bound_lambda=1, max_steps=100):
    E_list = []
    oldE = float('inf')
    Y = (-unary).softmax(-1)  # [N, K]
    for i in range(max_steps):
        pairwise = bound_lambda * kernel.matmul(Y)  # [N, K]
        exponent = -unary + pairwise
        Y = exponent.softmax(-1)
        E = entropy_energy(Y, unary, pairwise, bound_lambda).item()
        E_list.append(E)
        if (i > 1 and (abs(E - oldE) <= 1e-8 * abs(oldE))):
            break
        else:
            oldE = E
    return Y

def LAME_optimize(logits, feats, knn, force_symmetry=True):
    affinity = eval(f'rbf_affinity')(sigma=1.0, knn=knn)
    probas = F.softmax(logits, 1)
    # --- Get unary and terms and kernel ---
    unary = - torch.log(probas + 1e-10)  # [N, K]
    feats = F.normalize(feats, p=2, dim=-1)  # [N, d]
    kernel = affinity(feats)  # [N, N]
    if force_symmetry: kernel = 1/2 * (kernel + kernel.t())
    # --- Perform optim ---
    Y = laplacian_optimization(unary, kernel)
    return Y

class ODS(StreamEstimator):
    def __init__(self, args):
        super().__init__()
        self.args = args
        
        model = args.model
        self.base = deepcopy(args.model)
        self.model = configure_model(model, args.optim.bn_momentum, args.use_learnt_stats)
        self.optimizer = optim.Adam(
            model.parameters(), 
            lr=float(args.optim.lr),
            weight_decay=float(args.optim.wd)
        )
        self.num_class = args.num_class
        self.capacity = args.capacity
        self.reset()

        self.memory = None
        self.memory_labels = None
        self.mem = memory.PBRS(capacity=args.capacity, num_class=args.num_class)
        self.queue_dist = LabelDistributionQueue(num_class=args.num_class, capacity=args.capacity)
        self.memory_position = [[] for i in range(self.num_class)]
        self.memory_counts = [0 for i in range(self.num_class)]
        self.random = np.random.default_rng(seed=0)

        self.exp_info = []

    def storage_sample(self, x, y):
        if self.memory is None: 
            self.memory = x.unsqueeze(0)
            self.memory_labels = [y.item()]
            return True
        
        if self.memory.shape[0] < self.capacity:
            self.memory = torch.cat([self.memory, x.unsqueeze(0)])
            self.memory_labels.append(y.item())
            self.memory_position[y].append(self.memory.shape[0] - 1)
            return True

        siz = [len(arr) for arr in self.memory_position]
        mx_siz = np.max(siz)
        mx_cls = [i for i in range(self.num_class) if mx_siz == len(self.memory_position[i])]
        if y not in mx_cls:
            rep_cls = self.random.choice(mx_cls)
            rep_pos = self.random.choice(self.memory_position[rep_cls])
            self.memory_position[rep_cls].remove(rep_pos)
            self.memory[rep_pos, ...] = x
            self.memory_labels[rep_pos] = y.item()
            self.memory_position[y].append(rep_pos)
            return True
        else:
            rep_cls = y
            index = self.random.integers(low=0, high=self.memory_counts[rep_cls], endpoint=False)
            if index >= siz[rep_cls]: return False
            rep_pos = self.memory_position[rep_cls][index]
            self.memory[rep_pos, ...] = x
            self.memory_labels[rep_pos] = y.item()
            return True

    def storage(self, x, outputs, domain):
        assert(x.shape[0] == outputs.shape[0])
        x = x.detach().cpu()
        outputs = outputs.detach().cpu()
        entropys = softmax_entropy(outputs)
        predicts = outputs.max(1)[1]
        for i in range(x.shape[0]):
            y = predicts[i]
            self.memory_counts[y] += 1
            self.storage_sample(x[i, ...], y)

    def sample(self, batch_sample):
        if self.memory is None: return None
        return self.memory.cuda(), self.memory_labels


    def forward(self, x):
        outputs = self.forward_and_adapt(x, self.model, self.optimizer)
        return outputs

    def reset(self): pass

    @torch.enable_grad()  # ensure grads in possible no grad context for testing
    def forward_and_adapt(self, x, model, optimizer):
        # forward
        with torch.no_grad():
            # Get model output & features
            model.eval()
            outputs = model(x)
            model_feats  = extract_feats(model)
            base_logits  = self.base(x)
            optim_dist = LAME_optimize(base_logits, model_feats, self.args.optim.knn)
            probas = torch.sqrt(F.softmax(outputs, 1) * optim_dist)
            unary = - torch.log(probas + 1e-10)
            refine_outputs = (-unary).softmax(-1)
            outputs = refine_outputs
            self.storage(x, outputs, None)
        self.save()

        model.train()
        feats, labels = self.sample(x.cuda())
        self.queue_dist.update(torch.tensor(labels))
        weight = 1.0 - self.queue_dist.get() + 0.1
        weight = weight / weight.sum()
        if feats is not None:
            weight = weight[labels].cuda()
            loss = softmax_entropy(model(feats)) * weight
            loss = loss.sum(0) / weight.sum(0)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
        return outputs

    def save(self, ): pass
    def print(self, ): pass 

    def fit(self, stream_dataset):
        raise NotImplementedError("Test-time adaptation methods do not support fit method.")

    def predict(self, X):
        """
        Predict y for input X.

        :param X: input.
        """
        return self.forward(X)
        
    def evaluate(self, y_pred, y_true):
        """
        Evaluate stream algorithm on a stream dataset.

        :param y_pred: predict y.
        :param y_true: ground-truth y.
        """
        accs = torch.sum(y_pred == y_true).item() / y_true.size(0)
        return accs

def extract_feats(model):
    return model.feats.mean((-2, -1)) if hasattr(model, "feats") else model.module.feats.mean((-2, -1))

@torch.jit.script
def softmax_entropy(x: torch.Tensor) -> torch.Tensor:
    """Entropy of softmax distribution from logits."""
    return -(x.softmax(1) * x.log_softmax(1)).sum(1)

def configure_model(model, bn_momentum, use_learnt_stats):
    """Configure model for use with note."""
    # iabn.convert_iabn(model)
    for param in model.parameters():  # initially turn off requires_grad for all
        param.requires_grad = False
    for module in model.modules():
        if isinstance(module, nn.BatchNorm1d) or isinstance(module, nn.BatchNorm2d):
            if use_learnt_stats:
                module.track_running_stats = True
                module.momentum = bn_momentum
            else:
                # With below, this module always uses the test batch statistics (no momentum)
                module.track_running_stats = False
                module.running_mean = None
                module.running_var = None
            module.weight.requires_grad_(True)
            module.bias.requires_grad_(True)
        if isinstance(module, iabn.InstanceAwareBatchNorm2d) or isinstance(module, iabn.InstanceAwareBatchNorm1d):
            for param in module.parameters():
                param.requires_grad = True
    return model

def setup(model, args):
    base = deepcopy(model)
    model = configure_model(model, args.optim.bn_momentum, args.use_learnt_stats)
    optimizer = optim.Adam(
        model.parameters(), 
        lr=float(args.optim.lr),
        weight_decay=float(args.optim.wd)
    )
    ods_model = ODS(
        args,
        base,
        model, 
        optimizer,
        num_class=args.num_class,
        capacity=args.queue_size,
    )
    ods_model.reset()
    return ods_model