import copy
import torch
import torch.nn.functional as F
from cifar_models import resnet
from utils.utils_loss import wce_loss
import time
import random
from sklearn.mixture import GaussianMixture
import numpy as np
from torch.utils.data import Dataset, DataLoader
from functools import partial

class Trainer:
    def __init__(self, args, id) -> None:
        
        self.args = args
        self.id = id

        
    def assign_device(self):
        if len(self.args.gpu_ids) == 1:
            self.device = torch.device("cuda:" + str(self.args.gpu_ids[0]))
        else:
            self.device = torch.device("cuda:" + str(self.args.gpu_ids[self.id]))

    # config dataset and loader
    def create_dataloader(self, train_loader, eval_train_loader):
        self.train_loader = copy.deepcopy(train_loader)
        self.eval_train_loader = copy.deepcopy(eval_train_loader)
        self.n_samples = len(self.train_loader.dataset)

    def create_model(self, num_classes=None):
        self.num_classes = num_classes
        if self.args.mo == 'resnet':
            self.model = resnet(depth=32, num_classes=self.num_classes)
            self.model = add_feature_extractor_to_model(self.model, self.args.mo)
            self.n_features = 64

        self.model.to(self.device)
        self.model_g = copy.deepcopy(self.model)
        if self.args.use_ema:
            self.ema = EMA(self.model, ema_step=self.args.ema_step, alpha=self.args.ema_alpha, mode=self.args.ema_mode)

    def create_optimizer(self):
        if self.args.optimizer == "sgd":
            self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.lr, weight_decay=self.args.wd, momentum=0.9)
            self.optimizer_g = torch.optim.SGD(self.model_g.parameters(), lr=self.args.lr, weight_decay=self.args.wd, momentum=0.9)


    def create_loss_fn(self):
        self.loss_refine_fn = wce_loss
        self.loss_origin_fn = wce_loss


    def config_pseudo_label(self):
        self.Y_refine_mat = row_normalize_mat(torch.Tensor(self.train_loader.dataset.given_label_matrix).to(self.device))
        self.Y_refine_F = copy.deepcopy(self.Y_refine_mat)
        self.Y_refine_G = copy.deepcopy(self.Y_refine_mat)

        if len(self.args.lambda_list) == 1:
            self._lambda = self.args.lambda_list[0]
        else:
            self._lambda = self.args.lambda_list[max(self.id, len(self.args.lambda_list)-1)]
        if len(self.args.beta_list) == 1:
            self._beta = self.args.beta_list[0]
        else:
            self._beta = self.args.beta_list[max(self.id, len(self.args.beta_list)-1)]

        self.mask_beta = torch.ones_like(self.Y_refine_mat) * self._beta
        self.embeddings = torch.zeros((self.n_samples, self.n_features)).to(self.device)
        self.outputs = torch.zeros((self.n_samples, self.num_classes)).to(self.device)
        self.outputs_g = torch.zeros((self.n_samples, self.num_classes)).to(self.device)
        self.mixup_g = Mixup(alpha=1, is_bias=True)

        if self.args.use_rollwindow:
            self.rollwindow_mat = RollWindow(self.args.window_size, self.Y_refine_mat)

        if self.args.T != 1.0:
            temp_fun = partial(F.softmax)
            F.softmax_T = lambda x, dim: temp_fun(x / self.args.T, dim)
            F.softmax = partial(F.softmax_T)
            
    def train_epoch(self, epoch):
        self.current_epoch = epoch

        self.model.train()
        if self.args.use_rollwindow:
            Y_refine_mat = self.rollwindow_mat.reduce()
        else:
            Y_refine_mat = self.Y_refine_mat

        for i, (images_aug_list, labels, _, indexes) in enumerate(self.train_loader):
            X_list = list(map(lambda x: x.to(self.device), images_aug_list))
            Y = labels.to(self.device)
            Y_refine = Y_refine_mat[indexes, :].to(self.device)
            outputs_list = list(map(lambda x: self.model(x), X_list))
            # 伪标记
            # loss_fn 里支持配置mix-up、ema_model
            loss_refine = sum(list(map(lambda x: self.loss_refine_fn(x, Y_refine), outputs_list))) / len(outputs_list)
            loss = loss_refine
            # 原始标记
            if self.args.loss_use_origin:
                loss_origin = sum(list(map(lambda x: self.loss_origin_fn(x, Y), outputs_list))) / len(outputs_list)
                loss += self.args.loss_origin_weight * loss_origin
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            if self.args.use_ema:
                self.ema.update_ema_variables(self.model)

            if self.args.use_step_refine_F:
                self.step_update_Y_refine_F(indexes, outputs_list, Y)

        self.model.eval()
        self.epoch_update()
        if self.args.use_epoch_refine_F:
            self.epoch_update_Y_refine_F()

        
        if epoch % self.args.enhance_interval == 0:
            self.epoch_update_mask_beta()
            self.epoch_update_Y_refine_G()
            self.epoch_refine_supervision()
        else:
            self.epoch_refine_supervision_without_G()

        if self.args.use_rollwindow:
            self.rollwindow_mat.put(self.Y_refine_mat)


    def epoch_refine_supervision(self, verbose=True):
        if verbose:
            temp1 = copy.deepcopy(self.Y_refine_mat)
            temp2 = copy.deepcopy((1 - self._lambda) * self.Y_refine_mat + self._lambda * self.Y_refine_F)
            temp3 = copy.deepcopy(self.Y_refine_F)
            temp4 = (1 - self.mask_beta) * self.Y_refine_F + self.mask_beta * self.Y_refine_G
            temp5 = copy.deepcopy(self.train_loader.dataset.true_labels)
            results = list(map(lambda x: x.cpu().argmax(dim=1), (temp1, temp2, temp3, temp4)))
            accs = list(map(lambda x: (x == temp5).sum() / len(x) * 100, results))
            print(accs)
            accs = list(map(lambda x: (x[self.unreliable_idx] == temp5[self.unreliable_idx]).sum() / len(self.unreliable_idx) * 100, results))
            print(accs)
        self.Y_refine_mat = (1 - self._lambda) * self.Y_refine_mat + self._lambda * ((1 - self.mask_beta) * self.Y_refine_F + self.mask_beta * self.Y_refine_G)
    
    def epoch_refine_supervision_without_G(self):
        self.Y_refine_mat = (1 - self._lambda) * self.Y_refine_mat + self._lambda * self.Y_refine_F
        

    def step_refine_supervision(self, indexes):
        self.Y_refine_mat[indexes, :] = (1 - self._lambda) * self.Y_refine_mat[indexes, :] + self._lambda * ((1 - self.mask_beta[indexes, :]) * self.Y_refine_F[indexes, :] + self.mask_beta[indexes, :] * self.Y_refine_G[indexes, :])


    def epoch_update(self):
        with torch.no_grad():
            for image_ori, labels, _, indexes in self.eval_train_loader:
                X, Y = image_ori.to(self.device), labels.to(self.device)
                self.embeddings[indexes, :], self.outputs[indexes, :] = list(map(lambda x: x.clone().detach(), self.model.forward_with_last_layer(X)))
            self.embeddings = F.normalize(self.embeddings, dim=1)

    def epoch_update_Y_refine_F(self, momentum=1.0):
        print("Updating F.")
        delta = F.softmax(self.outputs, dim=1)
        if self.args.annotation_type.startswith("partial"):
            Y = torch.Tensor(self.train_loader.dataset.given_label_matrix).to(self.device)
            delta = row_normalize_mat(delta * Y)
        self.Y_refine_F = (1 - momentum) * self.Y_refine_F + momentum * delta.clone().detach()

    def step_update_Y_refine_F(self, indexes, outputs_list, Y, momentum=1.0, use_aug_aggregation=False):
        if use_aug_aggregation:
            delta =sum(list(map(lambda x: F.softmax(x, dim=1), outputs_list))) / len(outputs_list)
        else:
            delta = F.softmax(outputs_list[0], dim=1)

        if self.args.annotation_type.startswith("partial"):
            delta = row_normalize_mat(delta * Y)
        self.Y_refine_F[indexes, :] = (1 - momentum) * self.Y_refine_F[indexes, :] + momentum * delta.clone().detach()

    def epoch_update_Y_refine_G(self, momentum=1.0):
        print("Updating G.")
        self.model_g.train()
        self.model_g.load_state_dict(self.model.state_dict())
        self.optimizer_g.load_state_dict(self.optimizer.state_dict())
        for _ in range(0, self.args.finetune_eps):
            for imgs_reli, imgs_unreli, lbl_reli, lbl_unreli, idx_reli, idx_unreli in self.local_loader:
                imgs_reli, imgs_unreli = list(map(lambda x: [ item.to(self.device) for item in x], (imgs_reli, imgs_unreli)))
                lbl_reli, lbl_unreli = list(map(lambda x: x.to(self.device), (lbl_reli, lbl_unreli)))
                if self.args.use_ema:
                    outputs_unreli_list = list(map(lambda x: self.ema(x).clone().detach(), imgs_unreli))
                    outputs_reli_list   = list(map(lambda x: self.ema(x).clone().detach(), imgs_reli))
                else:
                    with torch.no_grad():
                        outputs_unreli_list = list(map(lambda x: self.model(x).clone().detach(), imgs_unreli))
                        outputs_reli_list = list(map(lambda x: self.model(x).clone().detach(), imgs_reli))

                align_Y_unreli_list = list(map(lambda x: F.softmax(x, dim=1), outputs_unreli_list))
                align_Y_reli_list   = list(map(lambda x: F.softmax(x, dim=1), outputs_reli_list))
                if self.args.annotation_type.startswith("partial"):
                    align_Y_unreli_list = list(map(lambda x: row_normalize_mat(x * lbl_unreli), align_Y_unreli_list))
                    align_Y_reli_list = list(map(lambda x: row_normalize_mat(x * lbl_reli), align_Y_reli_list))

                imgs_mixed = list(map(lambda x1, x2, y1, y2: self.mixup_g.mixup_two_targets(x1, x2, y1, y2), imgs_reli, imgs_unreli, align_Y_reli_list, align_Y_unreli_list))
                outputs_mixed = list(map(lambda x: self.model_g(x), imgs_mixed))
                loss_mixed = sum(list(map(lambda x: self.mixup_g.mixup_ce_loss_with_softmax(x), outputs_mixed))) / len(outputs_mixed)
                loss_align = sum(list(map(lambda x, y: wce_loss(self.model_g(x), y), imgs_reli, align_Y_reli_list))) / len(imgs_reli)
                loss_g = self.args.loss_weight_kappa * loss_mixed + (1 - self.args.loss_weight_kappa) * loss_align
                self.optimizer_g.zero_grad()
                loss_g.backward()
                self.optimizer_g.step()
            print("fine-tuning.")
        self.model_g.eval()
        with torch.no_grad():
            for image_ori, labels, _, indexes in self.eval_train_loader:
                X, Y = image_ori.to(self.device), labels.to(self.device)
                self.outputs_g[indexes, :] = self.model_g(X).clone().detach()


        delta = F.softmax(self.outputs_g, dim=1)
        if self.args.annotation_type.startswith("partial"):
            Y = torch.Tensor(self.eval_train_loader.dataset.given_label_matrix).to(self.device)
            delta = row_normalize_mat(delta * Y)

        self.Y_refine_G = (1 - momentum) * self.Y_refine_G + momentum * delta.clone().detach()
    

    def epoch_update_mask_beta(self, threshold=0.5, min_ratio=0.1, max_ratio=0.8, random_ratio=1, random_mode=False):
        print("Updating mask beta.")
        all_idx = [ i for i in range(0, self.n_samples)]

        if random_mode:
            k = int(len(all_idx) * random.uniform(a=min_ratio, b=max_ratio))
            reliable_idx = random.sample(all_idx, k=k)
            unreliable_sub_idx = random.sample(all_idx, k=k)
        else:
            split_criterion_values = - (F.softmax(self.outputs, dim=1) * torch.log_softmax(self.outputs, dim=1)).sum(dim=1)
            split_criterion_values = (split_criterion_values - split_criterion_values.min())/(split_criterion_values.max() - split_criterion_values.min())  
            split_criterion_values = split_criterion_values.reshape(-1, 1).cpu().numpy()
            # fit a two-component GMM to the loss
            gmm = GaussianMixture(n_components=2, max_iter=10, tol=1e-2, reg_covar=5e-4)
            gmm.fit(split_criterion_values)
            prob = gmm.predict_proba(split_criterion_values)
            prob = prob[:, gmm.means_.argmin()]

            pred = (prob > threshold)
            reliable_idx = pred.nonzero()[0].tolist()
            kth_min, kth_max = int(self.n_samples * min_ratio), int(self.n_samples * max_ratio)
            if len(reliable_idx) < kth_min:
                threshold = np.partition(prob, kth=self.n_samples - kth_min)[self.n_samples - kth_min]
                pred = (prob > threshold)
                reliable_idx = pred.nonzero()[0].tolist()
            if len(reliable_idx) > kth_max:
                threshold = np.partition(prob, kth=self.n_samples - kth_max)[self.n_samples - kth_max]
                pred = (prob > threshold)
                reliable_idx = pred.nonzero()[0].tolist()

            unreliable_idx = list(set(all_idx) - set(reliable_idx))
            unreliable_embeddings = self.embeddings[unreliable_idx,:]

            reliable_idx = random.sample(reliable_idx, k=int(len(reliable_idx)*random_ratio))
            reliable_embeddings = self.embeddings[reliable_idx,:]

            t1 = time.time()
            with torch.no_grad():
                solver = KNN_solver(reliable_embeddings, unreliable_embeddings, self.device)
                neighbor_idx = solver.nearest_neighbor(5000, 5000)
            print("Nearest Neighbor Module Use {} seconds. ".format(time.time()-t1))
            unreliable_sub_idx = [ unreliable_idx[i] for i in neighbor_idx ]
            print("Unreliable subset samples: {}.".format(len(set(unreliable_sub_idx))))
            print("Reliable samples: {}.".format(len(set(reliable_idx))))
            print("Unreliable samples: {}.".format(len(set(unreliable_idx))))

        self.reliable_idx = reliable_idx
        self.unreliable_idx = unreliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx
        self.mask_beta[unreliable_idx, :] = self._beta
        self.mask_beta[list(set(all_idx) - set(unreliable_idx)), :] = 0
        self.local_dataset = LocalDataset(self.train_loader.dataset)
        self.local_dataset.update_idx(self.reliable_idx, self.unreliable_sub_idx)
        self.local_loader = DataLoader(
                dataset = self.local_dataset,
                batch_size = self.args.bs,
                shuffle=True,
                num_workers=8,
                drop_last=True
            )

def row_normalize_mat(Y):
    Y_norm = Y / Y.sum(dim=1, keepdim=True)
    return Y_norm

def add_feature_extractor_to_model(model, model_name):
    import types
    if not hasattr(model, 'last_k_layer_forward'):
        if model_name == "resnet":
            def forward_with_last_layer(self, x):
                x = self.conv1(x)
                x = self.bn1(x)
                x = self.relu(x)    # 32x32

                x = self.layer1(x)  # 32x32
                x = self.layer2(x)  # 16x16
                x = self.layer3(x)  # 8x8

                x = self.avgpool(x)
                l = x.view(x.size(0), -1)
                x = self.fc(l)
                return l, x 
        model.forward_with_last_layer = types.MethodType(forward_with_last_layer, model)
    return model


class KNN_solver:
    def __init__(self, A, B, device) -> None:
        self.A = A
        self.B = B
        self.device = device

    def compute_distances_tensor(self, A, B, device):
        A = A.to(device)
        B = B.to(device)
        m = A.shape[0]
        n = B.shape[0]
        M = A @ B.T
        H = torch.square(A).sum(dim=1, keepdim=True).repeat(1,n)
        K = torch.square(B).sum(dim=1, keepdim=True).T.repeat(m,1)
        # print(A.device, B.device, M.device, H.device, K.device)
        return torch.sqrt(-2 * M + H + K)

    def nearest_neighbor(self, num_subset_A = 10000, num_subset_B = 10000):
        def gen_start_end(num_set, num_subset):
            len_gen = (num_set // num_subset) if (num_set % num_subset == 0) else  (num_set // num_subset + 1)
            for i in range(len_gen):
                start_idx = i * num_subset
                end_idx = min((i + 1) * num_subset, num_set)
                yield start_idx, end_idx

        num_set_A, num_set_B = self.A.shape[0], self.B.shape[0]

        A_start_end_iter = enumerate(gen_start_end(num_set_A, num_subset_A))
        B_start_end_iter = enumerate(gen_start_end(num_set_B, num_subset_B))

        C = torch.zeros((num_set_A, num_set_B), dtype=torch.float32)
        for i, (A_start_idx, A_end_idx) in A_start_end_iter:
            for j, (B_start_idx, B_end_idx) in B_start_end_iter:
                A_sub = self.A[A_start_idx:A_end_idx]
                B_sub = self.B[B_start_idx:B_end_idx]
                C_sub = self.compute_distances_tensor(A_sub, B_sub, self.device)
                C[A_start_idx:A_end_idx, B_start_idx:B_end_idx] = C_sub.clone().detach().cpu()

        nearest_idx = C.argmin(dim=1).numpy()
        return nearest_idx

class LocalDataset(Dataset):
    def __init__(self, train_dataset, strong_transform_times=2, weak_transform_times=0): 
        self.images = train_dataset.images
        self.given_label_matrix = train_dataset.given_label_matrix
        self.transform = train_dataset.transform
        self.weak_transform = train_dataset.weak_transform
        self.strong_transform = train_dataset.strong_transform
        self.weak_transform_times = weak_transform_times
        self.strong_transform_times = strong_transform_times
        self.reliable_idx = [0]
        self.unreliable_sub_idx = [0] 

    def update_idx(self, reliable_idx, unreliable_sub_idx):
        self.reliable_idx = reliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx

    def __getitem__(self, index):
        index_reli, index_unreli = self.reliable_idx[index], self.unreliable_sub_idx[index]
        img_reli, img_unreli = self.images[index_reli], self.images[index_unreli]
        lbl_reli, lbl_unreli = self.given_label_matrix[index_reli], self.given_label_matrix[index_unreli]
        img_reli_o, img_unreli_o = list(map(lambda x: self.transform(x), [img_reli, img_unreli]))
        imgs_reli, imgs_unreli = [img_reli_o], [img_unreli_o]
        imgs_reli += [ self.weak_transform(img_reli) for _ in range(self.weak_transform_times)]
        imgs_reli += [ self.strong_transform(img_reli) for _ in range(self.strong_transform_times)]
        imgs_unreli += [ self.weak_transform(img_unreli) for _ in range(self.weak_transform_times)]
        imgs_unreli += [ self.strong_transform(img_unreli) for _ in range(self.strong_transform_times)]

        return imgs_reli, imgs_unreli, lbl_reli, lbl_unreli, index_reli, index_unreli
           
    def __len__(self):    
        return len(self.reliable_idx)
    

class EMA:
    def __init__(self, model, ema_step=40000, alpha=0.997, mode="sigmoid") -> None:
        self.ema_model = copy.deepcopy(model)
        self.global_step = 0
        self.ema_step = ema_step
        self.alpha = alpha
        self.mode = mode
        if self.mode == "sigmoid":
            self.mode_fun = self.sigmoid_rampup
        if self.mode == "linear":
            self.mode_fun = self.linear_rampup

    
    def update_ema_variables(self, model):
        # Use the true average until the exponential average is more correct
        if self.alpha == 0:
            for ema_param, param in zip(self.ema_model.parameters(), model.parameters()):
                ema_param.data = param.data
        else:
            alpha = self.mode_fun(self.global_step + 1, self.ema_step)* self.alpha
            for ema_param, param in zip(self.ema_model.parameters(), model.parameters()):
                ema_param.data.mul_(alpha).add_(param.data, alpha = 1 - alpha)
        self.global_step += 1

    def __call__(self, x):
        with torch.no_grad():
            y = self.ema_model(x)
        return y
    
    @staticmethod
    def sigmoid_rampup(current, rampup_length):
        if rampup_length == 0:
            return 1.0
        else:
            current = np.clip(current, 0.0, rampup_length)
            phase = 1.0 - current / rampup_length
            return float(np.exp(-5.0 * phase * phase))
    
    @staticmethod
    def linear_rampup(current, rampup_length):
        if rampup_length == 0:
            return 1.0
        else:
            current = np.clip(current, 0.0, rampup_length)
            phase = current / rampup_length
            return phase
        

class Mixup:
    def __init__(self, alpha=1.0, is_bias=False) -> None:
        self.alpha = alpha
        self.is_bias = is_bias

    def mixup_two_targets(self, x1, x2, y1, y2):
        """Returns nn-mixed inputs, pairs of targets, and lambda
        """
        if self.alpha > 0:
            lam = np.random.beta(self.alpha, self.alpha)
        else:
            lam = 1
        if self.is_bias: lam = max(lam, 1-lam)

        mixed_x = lam * x1 + (1-lam) * x2
        self.y_a, self.y_b = y1, y2
        self.lam = lam 
        return mixed_x

    def mixup_ce_loss_with_softmax(self, preds):
        """ mixed categorical cross-entropy loss
        """
        mixup_loss_a = - torch.mean(torch.sum(self.y_a * F.log_softmax(preds, dim=1), dim=1))
        mixup_loss_b = - torch.mean(torch.sum(self.y_b * F.log_softmax(preds, dim=1), dim=1))

        mixup_loss = self.lam * mixup_loss_a + (1- self.lam) * mixup_loss_b
        return mixup_loss
    

class RollWindow:
    def __init__(self, window_size, item) -> None:
        self.window_size = window_size
        self.window = [ copy.deepcopy(item) for _ in range(window_size) ]
        self.pointer = -1

    def reduce(self):
        return sum(self.window) / len(self.window)
    
    def put(self, item):
        self.pointer = (self.pointer + 1) % self.window_size
        self.window[self.pointer] = copy.deepcopy(item)
    
    

