import copy
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import time
import functools

from sklearn.mixture import GaussianMixture
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA

class PLG_for_NLL_realworld:
    def __init__(self, num_class, threshold, epochs, device, finetune_eps, finetune_lr, finetune_wd, finetune_scale, loss_mix_param, pl_start_epoch) -> None:
        # basical conig
        self.base_model = None
        self.train_loader = None
        self.num_class = num_class
        self.threshold = threshold
        self.delta = (threshold - 1e-4) / epochs
        self.device = device
        self.finetune_eps = finetune_eps
        self.finetune_lr = finetune_lr
        self.finetune_wd = finetune_wd
        self.finetune_scale = finetune_scale
        self.loss_mix_param = loss_mix_param
        self.pl_start_epoch = pl_start_epoch
        # self.T = T

        # variable about splitting 
        self.split_criterion_values_arr = []
        self.correctness_arr = None

        #  idx
        self.reliable_idx = None
        self.unreliable_idx = None
        self.unreliable_sub_idx = None

        pass

    def split_train(self, review=True, printlog=True):
        print("split_train.")
        t = time.time()
        self.base_model.eval()

        split_criterion_values = torch.zeros(len(self.train_loader.dataset))
        correctness_arr = torch.zeros(len(self.train_loader.dataset), dtype=torch.bool)
        preds_arr = torch.zeros(len(self.train_loader.dataset), dtype=torch.long)
        
        embeddings = torch.zeros((len(self.train_loader.dataset), 512))
        print(embeddings.shape)

        with torch.no_grad():
            self.base_model.eval()
            # ------------------------train loader--------------------------------
            # inputs:FloatTensor; targets: LongTensor, (batch_size,); 
            # index: LongTensor, (batch_size,); trues: LongTensor, (batch_size,);
            # --------------------------------------------------------------------
            for _, (inputs, targets, index, trues) in enumerate(self.train_loader):
                print('inputs', inputs)
                print('targets', targets)
                print('index', index)
                exit()
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                outputs = self.base_model(inputs)
                sm_outputs = F.softmax(outputs, dim=1)
                
                batch_split_criterion_values = - (sm_outputs * torch.log_softmax(outputs, dim=1)).sum(dim=1)
                split_criterion_values[index] = batch_split_criterion_values.clone().detach().cpu()
                
                batch_pseudo_soft_label = sm_outputs.clone().detach().cpu()
                self.pseudo_soft_label[index] = batch_pseudo_soft_label
                
                preds = batch_pseudo_soft_label.argmax(dim=1)
                correctness_arr[index] = (preds.cpu() == trues).clone().detach().cpu()
                preds_arr[index] = preds

                # ------------------------obtain embedding from extracted features----------------------------------------------------
                embedding = torch.flatten(self.base_model.last_k_layer_forward(inputs, k=-2 ,lin=0, lout=5), start_dim=1, end_dim=-1)
                embeddings[index] = embedding.clone().detach().cpu()
                # --------------------------------------------------------------------------------------------------------------------
        
        split_criterion_values = (split_criterion_values - split_criterion_values.min())/(split_criterion_values.max() - split_criterion_values.min())    
        self.split_criterion_values_arr.append(split_criterion_values)
        embeddings = F.normalize(embeddings, dim=1)
        self.embeddings = copy.deepcopy(embeddings)

        if review: # average loss over last 5 epochs to improve convergence stability
            history = torch.stack(self.split_criterion_values_arr)
            split_criterion_values = history[-5:].mean(0)
            split_criterion_values = split_criterion_values.reshape(-1, 1)
        else:
            split_criterion_values = split_criterion_values.reshape(-1, 1)
        

        # fit a two-component GMM to the loss
        threshold = self.threshold
        gmm = GaussianMixture(n_components=2, max_iter=10, tol=1e-2, reg_covar=5e-4)
        gmm.fit(split_criterion_values)
        prob = gmm.predict_proba(split_criterion_values)
        prob = prob[:, gmm.means_.argmin()]

        threshold = self.threshold
        pred = (prob > threshold)
        self.reliable_idx = pred.nonzero()[0].tolist()
        kth_min, kth_max = int(len(embeddings) * 0.1), int(len(embeddings) * 0.9)
        if len(self.reliable_idx) < kth_min:
            threshold = np.partition(prob, kth=len(self.train_loader.dataset)-kth_min)[len(self.train_loader.dataset)-kth_min]
            pred = (prob > threshold)

            self.reliable_idx = pred.nonzero()[0].tolist()

        self.unreliable_idx = list(set(self.all_idx) - set(self.reliable_idx))

        self.pseudo_hard_label[self.reliable_idx] = self.idx_to_hard(preds_arr[self.reliable_idx])
        self.correctness_arr = copy.deepcopy(correctness_arr)
        self.prob = copy.deepcopy(prob)
        self.threshold = max(self.threshold - self.delta, prob.min() + 1e-2)

        if printlog:
            print("Current threshold: {}. ".format(threshold))
            train_acc = torch.sum(correctness_arr) / len(correctness_arr)
            reliable_acc = torch.sum(correctness_arr[self.reliable_idx]) / len(correctness_arr[self.reliable_idx])
            unreliable_acc = torch.sum(correctness_arr[self.unreliable_idx]) / len(correctness_arr[self.unreliable_idx])
            print("Train acc: {}.".format(train_acc))
            print("Train reliable: {} samples. Accuracy: {}. ".format(len(self.reliable_idx), reliable_acc))
            print("Train unreliable: {} samples. Accuracy: {}. ".format(len(self.unreliable_idx), unreliable_acc))
            print("split_train Use {} seconds. ".format(time.time()-t))

    def select_unreliable_subset(self, printlog=True):
        print("select_unreliable_subset.")
        t = time.time()
        embeddings = copy.deepcopy(self.embeddings)
        reliable_embeddings, unreliable_embeddings = embeddings[self.reliable_idx,:], embeddings[self.unreliable_idx,:]

        t1 = time.time()
        from sklearn.neighbors import NearestNeighbors
        neigh = NearestNeighbors(n_neighbors=1)
        neigh.fit(unreliable_embeddings)
        distance, neighbor_idx = neigh.kneighbors(reliable_embeddings)
        distance, neighbor_idx = distance.flatten(), neighbor_idx.flatten()
        print("Nearest Neighbor Module Use {} seconds. ".format(time.time()-t1))

        self.unreliable_sub_idx = [ self.unreliable_idx[i] for i in neighbor_idx ]

        self.unreliable_sub_idx_unique = list(set(self.unreliable_sub_idx))
        unreliable_sub_embeddings = embeddings[self.unreliable_sub_idx_unique,:]
        
        self.embeddings = copy.deepcopy(embeddings)

        t1 = time.time()
        from sklearn.neighbors import NearestNeighbors
        neigh_2 = NearestNeighbors(n_neighbors=1)
        neigh_2.fit(reliable_embeddings)
        distance_2, neighbor_idx_2 = neigh_2.kneighbors(unreliable_sub_embeddings)
        distance_2, neighbor_idx_2 = distance_2.flatten(), neighbor_idx_2.flatten()
        print("Nearest Neighbor Module Use {} seconds. ".format(time.time()-t1))

        self.reliable_sub_idx = [ self.reliable_idx[i] for i in neighbor_idx_2 ]

        if printlog:
            unreliable_sub_acc = torch.sum(self.correctness_arr[self.unreliable_sub_idx_unique]) / len(self.correctness_arr[self.unreliable_sub_idx_unique])
            print("Train unreliable subset: {} samples. Accuracy: {}. ".format(len(self.unreliable_sub_idx_unique), unreliable_sub_acc))
            self.reliable_sub_idx_unique = list(set(self.reliable_sub_idx))
            reliable_sub_acc = torch.sum(self.correctness_arr[self.reliable_sub_idx_unique]) / len(self.correctness_arr[self.reliable_sub_idx_unique])
            print("Train reliable subset: {} samples. Accuracy: {}. ".format(len(self.reliable_sub_idx_unique), reliable_sub_acc))
            print("select_unreliable_subset Use {} seconds. ".format(time.time()-t))

    def knn_pseudo_label_generator(self, printlog=True):
        from sklearn.neighbors import KNeighborsClassifier
        pseudo_label_generator = KNeighborsClassifier(n_neighbors=3)
        pseudo_label_generator.fit(self.embeddings[self.reliable_idx], self.hard_to_idx(self.pseudo_hard_label)[self.reliable_idx].tolist())
        pseudo_y_unreliable_subset = pseudo_label_generator.predict(self.embeddings[self.unreliable_sub_idx])
        self.pseudo_hard_label[self.unreliable_sub_idx] = self.idx_to_hard(torch.LongTensor(pseudo_y_unreliable_subset))
        if printlog:
            # pseudo_y_unreliable_subset = pseudo_y_unreliable_subset.tolist()
            true_y_unreliable_subset = self.train_loader.dataset.train_labels_gt[self.unreliable_sub_idx]
            pseudo_acc_unreliable_subset = (pseudo_y_unreliable_subset == true_y_unreliable_subset).sum() / len(true_y_unreliable_subset)
            print("After KNN, accuracy of unreliable subset: {}. ".format(pseudo_acc_unreliable_subset))

    def finetune_pseudo_label_generator(self, ema_model=None, confidence=None, optimizer=None, outside_epoch=None, printlog=True):
        print("finetune_pseudo_label_generator.")
        self.pl_model = copy.deepcopy(self.base_model)
        self.pl_model.train()
        
        ema_model = copy.deepcopy(ema_model)
        
        optim = torch.optim.SGD(self.pl_model.parameters(), lr=self.finetune_lr, weight_decay=self.finetune_wd, momentum=0.9)
        if optimizer!=None:
            optim.load_state_dict(optimizer.state_dict())
            for param_group_1, param_group_2 in zip(optim.param_groups, optimizer.param_groups):
                param_group_1["lr"] = param_group_2["lr"] / self.finetune_eps * self.finetune_scale

        self.confidence = copy.deepcopy(confidence)

        self.base_model.eval()

        loader = realworld_dataloader_finetune(self.train_loader, self.reliable_idx, self.unreliable_idx, self.unreliable_sub_idx)
        labeled_loader = loader.run('labeled')
        print(len(labeled_loader))


        for epoch in range(0, self.finetune_eps):
            t1 = time.time()
            for batch_idx, (img_reli_list, img_unreli, targets, trues, index) in enumerate(labeled_loader):

                img_reli_list = list(map(lambda x: x.to(self.device), img_reli_list))
                img_reli = img_reli_list[0]
                img_unreli, targets  = list(map(lambda x: x.to(self.device), (img_unreli, targets)))

                batch_reli_idx = torch.LongTensor(self.reliable_idx)[index]
                batch_confidence = torch.Tensor(self.confidence[batch_reli_idx]).float().to(self.device)
                if len(batch_reli_idx) == 1:
                    batch_confidence = torch.Tensor(self.confidence[batch_reli_idx][None]).float().to(self.device)

                # consistency
                logits_reli_list = list(map(lambda x: self.pl_model(x), img_reli_list))
                logits_reli = logits_reli_list[0]
                logits_unreli = self.pl_model(img_unreli)
                # nn mix-up
                # -----------------------mean-teacher start-------------------------------------------------
                with torch.no_grad():
                    ema_outputs_unreli = ema_model(img_unreli)
                    ema_outputs_reli_list = list(map(lambda x: ema_model(x), img_reli_list))
                # -----------------------mean-teacher end--------------------------------------------------
                
                # --------------------------mix up for consistency loss start----------------------------------------------
                partial_nn_mixup_two_targets = functools.partial(nn_mixup_two_targets, x2=img_unreli, y2=ema_outputs_unreli, alpha=1.0, device=self.device, is_bias=False)
                results_nn_mixup_two_targets = list(map(lambda x, y: partial_nn_mixup_two_targets(x1=x, y1=y), img_reli_list, ema_outputs_reli_list))
                mixed_outputs_u_list = list(map(lambda x: self.pl_model(x[0]), results_nn_mixup_two_targets))
                mix_loss_list = list(map(lambda x, y: mixup_ce_loss_with_softmax(x, y[1], y[2], y[3]), mixed_outputs_u_list, results_nn_mixup_two_targets))
                mix_loss = sum(mix_loss_list) / len(mix_loss_list)
                # --------------------------mix up for consistency loss end----------------------------------------------
                # loss
                Lc = mix_loss * exp_rampup(self.pl_start_epoch)(outside_epoch) * 1.0

                super_loss_list = list(map(lambda x: wce(x, batch_confidence), logits_reli_list))
                super_loss = sum(super_loss_list) / len(super_loss_list)

                # final fine-tuning loss
                loss =  self.loss_mix_param * Lc + super_loss

                # compute gradient and do SGD step
                optim.zero_grad()
                loss.backward()
                optim.step()

                batch_unreli_idx = torch.LongTensor(self.unreliable_sub_idx)[index]
                batch_pseudo_soft_label = F.softmax(logits_unreli, dim=1).clone().detach().cpu()
                self.pseudo_soft_label[batch_unreli_idx] = 0.3 * self.pseudo_soft_label[batch_unreli_idx] + 0.7 * batch_pseudo_soft_label

            print("Finetune Epoch {} Use {} seconds. ".format(epoch, time.time()-t1))

            self.pseudo_hard_label = self.idx_to_hard(self.hard_to_idx(self.pseudo_soft_label))

            if printlog:
                pseudo_y_unreliable_subset_unique = self.hard_to_idx(self.pseudo_hard_label[self.unreliable_sub_idx_unique]).numpy()
                true_y_unreliable_subset = self.train_loader.dataset.train_labels_gt[self.unreliable_sub_idx_unique]
                pseudo_acc_unreliable_subset = (pseudo_y_unreliable_subset_unique == true_y_unreliable_subset).sum() / len(true_y_unreliable_subset)
                print("After fine-tuning epoch {}, accuracy of unreliable subset: {}. ".format(epoch, pseudo_acc_unreliable_subset))
                pseudo_soft_label_idx = self.hard_to_idx(self.pseudo_soft_label).numpy()
                pseudo_soft_label_acc = (pseudo_soft_label_idx == self.train_loader.dataset.train_labels_gt).sum() / len(self.train_loader.dataset.train_labels_gt)
                print("After updating soft labels, accuracy of soft labels: {}. ".format(pseudo_soft_label_acc))

    def set_basic_config(self, base_model, train_loader):
        self.base_model = copy.deepcopy(base_model)
        self.train_loader = copy.deepcopy(train_loader)
        # idx
        self.all_idx = [ i for i in range(0, len(self.train_loader.dataset))]

    def initialize_pseudolabeling(self):
        # pseudo labeler
        self.pl_model = copy.deepcopy(self.base_model)
        # pseudo labels
        self.pseudo_hard_label = torch.zeros((len(self.train_loader.dataset), self.num_class))
        self.pseudo_soft_label = torch.zeros((len(self.train_loader.dataset), self.num_class))
        

    def update_soft_label(self, momentum=0.1, printlog=True):
        self.pseudo_soft_label = momentum * self.pseudo_hard_label + (1 - momentum) * self.pseudo_soft_label 
        if printlog:
            pseudo_soft_label_idx = self.hard_to_idx(self.pseudo_soft_label)
            pseudo_soft_label_acc = (pseudo_soft_label_idx == self.train_loader.dataset.train_labels_gt).sum() / len(self.train_loader.dataset.train_labels_gt)
            print("After updating soft labels, accuracy of soft labels: {}. ".format(pseudo_soft_label_acc))

    def soft_to_hard(self, x):
        with torch.no_grad():
            return (torch.zeros(len(x), self.num_class)).cuda().scatter_(1, (x.argmax(dim=1)).view(-1,1), 1)
    
    def idx_to_hard(self, x):
        with torch.no_grad():
            return torch.zeros(len(x), self.num_class, device=x.device).scatter_(1, x.view(-1,1), 1)
    
    def hard_to_idx(self, x):
        return torch.argmax(x, dim=1, keepdim=False)
        

class realworld_dataset_for_finetune(Dataset):
    def __init__(self, train_dataset, reliable_idx, unreliable_idx, unreliable_sub_idx, mode): 
        self.train_dataset = train_dataset
        self.train_data = train_dataset.train_data
        self.noise_label = train_dataset.train_labels
        self.true_label = train_dataset.train_labels_gt
        self.ori_transform = transforms.Compose([
                # transforms.RandomCrop(32, padding=4),
                # transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
            ])
        self.weak_transform = transforms.Compose([
                # transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
            ])
        self.strong_transform = transforms.Compose([
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
            ])
        
        self.reliable_idx = reliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx
        self.unlabeled_idx = list(set(unreliable_idx) - set(unreliable_sub_idx))

        self.mode = mode
        
        if mode == 'labeled':
            self.train_data_reliable = self.train_data[self.reliable_idx]
            self.train_data_unreliable = self.train_data[self.unreliable_sub_idx]
            self.noise_label_reliable = self.noise_label[self.reliable_idx]
            self.true_label_reliable = self.true_label[self.reliable_idx]
        if mode == 'unlabeled':
            self.train_data_unlabeled = self.train_data[self.unlabeled_idx]
            self.true_label_unlabeled = self.true_label[self.unlabeled_idx]
        
    def __getitem__(self, index):
        if self.mode=='labeled':
            img_reli, img_unreli, target, true_label = self.train_data_reliable[index], self.train_data_unreliable[index], self.noise_label_reliable[index], self.true_label_reliable[index]
            img_reli = Image.fromarray(img_reli)
            img_reli_1 = self.ori_transform(img_reli)
            img_reli_2 = self.weak_transform(img_reli)
            img_reli_3 = self.strong_transform(img_reli)
            img_unreli = Image.fromarray(img_unreli)
            img_unreli = self.ori_transform(img_unreli) 
            # img_unreli = self.weak_transform(img_unreli)         
            return (img_reli_1, img_reli_2, img_reli_3), img_unreli, target, true_label, index
            # return (img_reli_1, ), img_unreli, target, true_label, index
        
        elif self.mode=='unlabeled':
            img = self.train_data_unlabeled[index]
            img = Image.fromarray(img)
            img = self.ori_transform(img)            
            return img
           
    def __len__(self):
        if self.mode=='labeled':     
            return len(self.train_data_reliable)
        elif self.mode=='unlabeled':
            return len(self.train_data_unlabeled)

# finetune dataset
class realworld_dataloader_finetune:
    def __init__(self, train_loader, reliable_idx, unreliable_idx, unreliable_sub_idx):
        self.dataset = train_loader.dataset
        self.batch_size = train_loader.batch_size
        self.num_workers = train_loader.num_workers
        self.reliable_idx = reliable_idx
        self.unreliable_idx = unreliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx

    def run(self, mode):
        if mode == 'labeled':
            labeled_dataset = realworld_dataset_for_finetune(self.dataset, self.reliable_idx, self.unreliable_idx, self.unreliable_sub_idx, mode=mode)                
            labeled_loader = DataLoader(
                dataset=labeled_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers)             
            return labeled_loader

        if mode == 'unlabeled':
            unlabeled_dataset = realworld_dataset_for_finetune(self.dataset, self.reliable_idx, self.unreliable_idx, self.unreliable_sub_idx, mode=mode)                
            unlabeled_loader = DataLoader(
                dataset=unlabeled_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers)             
            return unlabeled_loader
        
# finetune loss
def linear_rampup(current, warm_up, lambda_u=25, rampup_length=16):
    current = np.clip((current-warm_up) / rampup_length, 0.0, 1.0)
    return lambda_u * float(current)

class SemiLoss(object):
    def __call__(self, outputs_x, targets_x, outputs_u, targets_u, epoch, warm_up):
        probs_u = torch.softmax(outputs_u, dim=1)

        Lx = -torch.mean(torch.sum(F.log_softmax(outputs_x, dim=1) * targets_x, dim=1))
        Lu = torch.mean((probs_u - targets_u)**2)

        return Lx, Lu, linear_rampup(epoch,warm_up)
    
class ConsistencyLoss(object):
    def __call__(self, output1, output2):
        preds1 = F.softmax(output1, dim=1).detach()
        preds2 = F.log_softmax(output2, dim=1)
        loss_kldiv = F.kl_div(preds2, preds1, reduction='none')
        loss_kldiv = torch.sum(loss_kldiv, dim=1)
        return torch.mean(loss_kldiv)

class ConsistencyLoss2(object):
    def __call__(self, output1, output2):
        preds1 = F.softmax(output1, dim=1).detach()
        preds2 = F.softmax(output2, dim=1)
        return torch.mean(torch.sum((preds1 - preds2)**2, dim=1))
    
def wce(outputs, weights):
    return (- torch.mean(torch.sum(F.log_softmax(outputs, dim=1) * weights, dim=1)))

def mixup_two_targets(x, y, alpha=1.0, device='cuda', is_bias=False):
    """Returns mixed inputs, pairs of targets, and lambda
    """
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1
    if is_bias: lam = max(lam, 1-lam)

    index = torch.randperm(x.size(0)).to(device)

    mixed_x = lam*x + (1-lam)*x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam

def nn_mixup_two_targets(x1, x2, y1, y2, alpha=1.0, device='cuda', is_bias=False):
    """Returns nn-mixed inputs, pairs of targets, and lambda
    """
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1
    if is_bias: lam = max(lam, 1-lam)

    mixed_x = lam*x1 + (1-lam)*x2
    y_a, y_b = y1, y2 
    return mixed_x, y_a, y_b, lam

def mixup_ce_loss_with_softmax(preds, targets_a, targets_b, lam):
    """ mixed categorical cross-entropy loss
    """
    mixup_loss_a = -torch.mean(torch.sum(F.softmax(targets_a,1)* F.log_softmax(preds, dim=1), dim=1))
    mixup_loss_b = -torch.mean(torch.sum(F.softmax(targets_b,1)* F.log_softmax(preds, dim=1), dim=1))

    mixup_loss = lam* mixup_loss_a + (1- lam)* mixup_loss_b
    return mixup_loss

def mixup_ce_loss(preds, targets_a, targets_b, lam):
    """ mixed categorical cross-entropy loss
    """
    mixup_loss_a = -torch.mean(torch.sum(targets_a * F.log_softmax(preds, dim=1), dim=1))
    mixup_loss_b = -torch.mean(torch.sum(targets_b * F.log_softmax(preds, dim=1), dim=1))

    mixup_loss = lam* mixup_loss_a + (1- lam)* mixup_loss_b
    return mixup_loss

def exp_rampup(rampup_length):
    """Exponential rampup from https://arxiv.org/abs/1610.02242"""
    def warpper(epoch):
        if epoch < rampup_length:
            epoch = np.clip(epoch, 0.0, rampup_length)
            phase = 1.0 - epoch / rampup_length
            return float(np.exp(-5.0 * phase * phase))
        else:
            return 1.0
    return warpper

def update_ema(model, ema_model, alpha, global_step):
    alpha = min(1 - 1 / (global_step +1), alpha)
    for ema_param, param in zip(ema_model.parameters(), model.parameters()):
        ema_param.data.mul_(alpha).add_(param.data, alpha=1-alpha)
 
    