import copy
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import time

from sklearn.mixture import GaussianMixture
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA

class PLG:
    def __init__(self, num_class, threshold, epochs, device, args) -> None:
        # basical conig
        self.base_model = None
        self.train_loader = None
        self.num_class = num_class
        self.threshold = threshold
        self.delta = (threshold - 1e-4) / epochs
        self.device = device
        self.args = args

        # variable about splitting 
        self.split_criterion_values_arr = []
        self.correctness_arr = None

        #  idx
        self.reliable_idx = None
        self.unreliable_idx = None
        self.unreliable_sub_idx = None

        pass

    def split_train(self, review=True, printlog=True):
        t = time.time()
        self.base_model.eval()

        split_criterion_values = torch.zeros(len(self.train_loader.dataset))
        correctness_arr = torch.zeros(len(self.train_loader.dataset), dtype=torch.bool)
        preds_arr = torch.zeros(len(self.train_loader.dataset), dtype=torch.long)
        if self.num_class == 10 or self.num_class == 100:  # CIFAR10 or CIFAR100
            embeddings = torch.zeros((len(self.train_loader.dataset), 512))
        elif self.num_class == 14:  # Clothing1M
            embeddings = torch.zeros((len(self.train_loader.dataset), 2048))
        elif self.num_class == 50:  # WebVision
            embeddings = torch.zeros((len(self.train_loader.dataset), 1536))
        print(embeddings.shape)
        
        with torch.no_grad():
            self.base_model.eval()
            for _, (inputs, targets, index, trues) in enumerate(self.train_loader):
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                outputs = self.base_model(inputs)
                sm_outputs = F.softmax(outputs, dim=1)
                preds = sm_outputs.argmax(dim=1)
                batch_split_criterion_values = - (sm_outputs * torch.log_softmax(outputs, dim=1)).sum(dim=1)
                split_criterion_values[index] = batch_split_criterion_values.clone().detach().cpu()
                correctness_arr[index] = (preds.cpu() == trues).clone().detach().cpu()
                preds_arr[index] = preds.clone().detach().cpu()
                self.pseudo_soft_label[index] = sm_outputs.clone().detach().cpu()
                embedding = torch.flatten(self.base_model.last_k_layer_forward(inputs, k=-2 ,lin=0, lout=5), start_dim=1, end_dim=-1)
                embeddings[index] = embedding.clone().detach().cpu()

        split_criterion_values = (split_criterion_values - split_criterion_values.min())/(split_criterion_values.max() - split_criterion_values.min())    
        self.split_criterion_values_arr.append(split_criterion_values)
        embeddings = F.normalize(embeddings, dim=1)
        self.embeddings = copy.deepcopy(embeddings)

        if review: # average loss over last 5 epochs to improve convergence stability
            history = torch.stack(self.split_criterion_values_arr)
            split_criterion_values = history[-5:].mean(0)
            split_criterion_values = split_criterion_values.reshape(-1, 1)
        else:
            split_criterion_values = split_criterion_values.reshape(-1, 1)
        

        # fit a two-component GMM to the loss
        threshold = self.threshold
        gmm = GaussianMixture(n_components=2, max_iter=10, tol=1e-2, reg_covar=5e-4)
        gmm.fit(split_criterion_values)
        prob = gmm.predict_proba(split_criterion_values)
        prob = prob[:, gmm.means_.argmin()]

        threshold = self.threshold
        pred = (prob > threshold)
        self.reliable_idx = pred.nonzero()[0].tolist()
        kth_min, kth_max = 4000, 30000
        if len(self.reliable_idx) < kth_min:
            threshold = np.partition(prob, kth=len(self.train_loader.dataset)-kth_min)[len(self.train_loader.dataset)-kth_min]
            pred = (prob > threshold)

            self.reliable_idx = pred.nonzero()[0].tolist()
        # if len(self.reliable_idx) > kth_max:
        #     threshold = np.partition(prob, kth=len(self.train_loader.dataset)-kth_max)[len(self.train_loader.dataset)-kth_max]
        #     pred = (prob > threshold)
        #     self.reliable_idx = pred.nonzero()[0].tolist()

        self.unreliable_idx = list(set(self.all_idx) - set(self.reliable_idx))

        self.pseudo_hard_label[self.reliable_idx] = self.idx_to_hard(preds_arr[self.reliable_idx])
        self.correctness_arr = copy.deepcopy(correctness_arr)
        self.prob = copy.deepcopy(prob)
        self.threshold = max(self.threshold - self.delta, prob.min() + 1e-2)

        if printlog:
            print("Current threshold: {}. ".format(threshold))
            train_acc = torch.sum(correctness_arr) / len(correctness_arr)
            reliable_acc = torch.sum(correctness_arr[self.reliable_idx]) / len(correctness_arr[self.reliable_idx])
            unreliable_acc = torch.sum(correctness_arr[self.unreliable_idx]) / len(correctness_arr[self.unreliable_idx])
            print("Train acc: {}.".format(train_acc))
            print("Train reliable: {} samples. Accuracy: {}. ".format(len(self.reliable_idx), reliable_acc))
            print("Train unreliable: {} samples. Accuracy: {}. ".format(len(self.unreliable_idx), unreliable_acc))
            print("split_train Use {} seconds. ".format(time.time()-t))

    def select_unreliable_subset(self, printlog=True):
        t = time.time()
        embeddings = copy.deepcopy(self.embeddings)
        reliable_embeddings, unreliable_embeddings = embeddings[self.reliable_idx,:], embeddings[self.unreliable_idx,:]

        t1 = time.time()
        from sklearn.neighbors import NearestNeighbors
        neigh = NearestNeighbors(n_neighbors=1)
        neigh.fit(unreliable_embeddings)
        distance, neighbor_idx = neigh.kneighbors(reliable_embeddings)
        distance, neighbor_idx = distance.flatten(), neighbor_idx.flatten()
        print("Nearest Neighbor Module Use {} seconds. ".format(time.time()-t1))

        self.unreliable_sub_idx = [ self.unreliable_idx[i] for i in neighbor_idx ]

        self.unreliable_sub_idx_unique = list(set(self.unreliable_sub_idx))
        unreliable_sub_embeddings = embeddings[self.unreliable_sub_idx_unique,:]
        
        self.embeddings = copy.deepcopy(embeddings)

        t1 = time.time()
        from sklearn.neighbors import NearestNeighbors
        neigh_2 = NearestNeighbors(n_neighbors=1)
        neigh_2.fit(reliable_embeddings)
        distance_2, neighbor_idx_2 = neigh_2.kneighbors(unreliable_sub_embeddings)
        distance_2, neighbor_idx_2 = distance_2.flatten(), neighbor_idx_2.flatten()
        print("Nearest Neighbor Module Use {} seconds. ".format(time.time()-t1))

        self.reliable_sub_idx = [ self.reliable_idx[i] for i in neighbor_idx_2 ]

        if printlog:
            unreliable_sub_acc = torch.sum(self.correctness_arr[self.unreliable_sub_idx_unique]) / len(self.correctness_arr[self.unreliable_sub_idx_unique])
            print("Train unreliable subset: {} samples. Accuracy: {}. ".format(len(self.unreliable_sub_idx_unique), unreliable_sub_acc))
            self.reliable_sub_idx_unique = list(set(self.reliable_sub_idx))
            reliable_sub_acc = torch.sum(self.correctness_arr[self.reliable_sub_idx_unique]) / len(self.correctness_arr[self.reliable_sub_idx_unique])
            print("Train reliable subset: {} samples. Accuracy: {}. ".format(len(self.reliable_sub_idx_unique), reliable_sub_acc))
            print("select_unreliable_subset Use {} seconds. ".format(time.time()-t))

    def knn_pseudo_label_generator(self, printlog=True):
        from sklearn.neighbors import KNeighborsClassifier
        pseudo_label_generator = KNeighborsClassifier(n_neighbors=3)
        pseudo_label_generator.fit(self.embeddings[self.reliable_idx], self.hard_to_idx(self.pseudo_hard_label)[self.reliable_idx].tolist())
        pseudo_y_unreliable_subset = pseudo_label_generator.predict(self.embeddings[self.unreliable_sub_idx])
        self.pseudo_hard_label[self.unreliable_sub_idx] = self.idx_to_hard(torch.LongTensor(pseudo_y_unreliable_subset))
        if printlog:
            pseudo_y_unreliable_subset = pseudo_y_unreliable_subset.tolist()
            true_y_unreliable_subset = self.train_loader.dataset.train_labels_gt[self.unreliable_sub_idx]
            pseudo_acc_unreliable_subset = (pseudo_y_unreliable_subset == true_y_unreliable_subset).sum() / len(true_y_unreliable_subset)
            print("After KNN, accuracy of unreliable subset: {}. ".format(pseudo_acc_unreliable_subset))

    def finetune_pseudo_label_generator(self, optimizer=None, printlog=False):
        self.pl_model = copy.deepcopy(self.base_model)
        self.pl_model.train()
        optim = torch.optim.SGD(self.pl_model.parameters(), lr=self.args['finetune_lr'], weight_decay=self.args['finetune_wd'], momentum=0.9)
        if optimizer!=None:
            optim.load_state_dict(optimizer.state_dict())
            for param_group_1, param_group_2 in zip(optim.param_groups, optimizer.param_groups):
                param_group_1["lr"] = param_group_2["lr"] / self.args['finetune_eps']
        # optim = torch.optim.Adam(self.pl_model.parameters(), lr=self.args.finetune_lr, weight_decay=self.args.finetune_wd)
        # optim = torch.optim.SGD(self.pl_model.parameters(), lr=self.args.finetune_lr, weight_decay=self.args.finetune_wd)
        # fc_params_id = list(map(id, self.pl_model.linear.parameters()))     # 返回的是parameters的 内存地址
        # base_params = filter(lambda p: id(p) not in fc_params_id, self.pl_model.parameters())
        # optim = torch.optim.SGD([
        #     {'params': base_params, 'lr': self.args['finetune_lr'] },   # 0
        #     {'params': self.pl_model.linear.parameters(), 'lr': self.args['finetune_lr'] }], weight_decay=self.args['finetune_wd'], momentum=0.9)
        
        self.base_model.eval()

        if self.num_class == 10 or self.num_class == 100:  # CIFAR10 or CIFAR100
            loader = cifar_dataloader_finetune(self.train_loader, self.reliable_idx, self.unreliable_idx, self.unreliable_sub_idx)
        elif self.num_class == 50:  # WebVision
            loader = webvision_dataloader_finetune(self.train_loader, self.reliable_idx, self.unreliable_idx, self.unreliable_sub_idx)
        # loader = cifar_dataloader_finetune(self.train_loader, self.reliable_sub_idx, self.unreliable_idx, self.unreliable_sub_idx_unique)
        labeled_loader = loader.run('labeled')
        # unlabeled_loader = loader.run('unlabeled')
        # unlabeled_iter = iter(unlabeled_loader)
        print(len(labeled_loader))

        w_x_reli = torch.FloatTensor([self.prob[i] for i in self.reliable_idx])

        criterion = ConsistencyLoss()
        criterion2 = wce
        for epoch in range(0, self.args['finetune_eps']):
            t1 = time.time()
            for batch_idx, (img_reli_list, img_unreli, targets, trues, index) in enumerate(labeled_loader):
                # try:
                #     img_unlabeled = unlabeled_iter.next()
                # except:
                #     unlabeled_iter = iter(unlabeled_loader)
                #     img_unlabeled = unlabeled_iter.next()

                batch_size = img_reli_list[0].size(0)
                labels_x = torch.zeros(batch_size, self.num_class).scatter_(1, targets.view(-1,1), 1) 
                w_x = w_x_reli[index].view(-1,1).type(torch.FloatTensor) 

                img_reli_list = list(map(lambda x: x.to(self.device), img_reli_list))
                img_unreli, labels_x, w_x = map(lambda x: x.to(self.device), (img_unreli, labels_x, w_x))

                # consistency
                logits_reli_list = list(map(lambda x: self.pl_model(x), img_reli_list))
                                        
                with torch.no_grad():
                    logits_reli_f_list = list(map(lambda x: self.base_model(x), img_reli_list))
                    px_list = list(map(lambda x: torch.softmax(x, dim=1), logits_reli_f_list))
                    px = sum(px_list) / len(px_list)
                    px = (1 - w_x) * labels_x + w_x * px              
                    ptx = px ** (1 / self.args['T']) # temparature sharpening 
                            
                    targets_reli_f = ptx / ptx.sum(dim=1, keepdim=True) # normalize           
                    targets_reli_f = targets_reli_f.clone().detach() 

                logits_unreli = self.pl_model(img_unreli)

                # Lc_1 = sum(list(map(lambda x: criterion(x.detach().clone(), logits_unreli), logits_reli_list))) / len(logits_reli_list)
                # Lc_2 = sum(list(map(lambda x: criterion2(x, targets_reli_f), logits_reli_list))) / len(logits_reli_list)
                # Lc = Lc_1 + Lc_2
                
                Lc_1 = sum(list(map(lambda x: criterion(x.detach().clone(), logits_unreli), logits_reli_list))) / len(logits_reli_list)
                # Lc_2 = sum(list(map(lambda x: criterion2(x, targets_reli_f), logits_reli_list))) / len(logits_reli_list)
                Lc_2 = criterion2(logits_reli_list[0], targets_reli_f)
                Lc_3 = sum(list(map(lambda x: criterion(logits_reli_list[0].detach().clone(), x), logits_reli_list[1:]))) / len(logits_reli_list[1:])
                # Lc_2 = sum(list(map(lambda x: criterion(targets_reli_f, x), logits_reli_list))) / len(logits_reli_list)
                Lc = Lc_1 + Lc_2 + Lc_3

                loss =  Lc
                # compute gradient and do SGD step
                optim.zero_grad()
                loss.backward()
                optim.step()

                batch_unreli_idx = torch.LongTensor(self.unreliable_sub_idx)[index]
                self.pseudo_soft_label[batch_unreli_idx] = 0.3 * self.pseudo_soft_label[batch_unreli_idx] + 0.7 * F.softmax(logits_unreli, dim=1).clone().detach().cpu()

            print("Finetune Epoch {} Use {} seconds. ".format(epoch, time.time()-t1))

            # pseudo_y_unreliable_subset = torch.zeros((len(self.unreliable_sub_idx),), dtype=torch.long)
            # pseudo_y_soft = torch.zeros((len(self.unreliable_sub_idx), self.num_class))
            # pseudo_y_unreliable_subset = torch.zeros((len(self.unreliable_sub_idx_unique),), dtype=torch.long)
            # pseudo_y_soft = torch.zeros((len(self.unreliable_sub_idx_unique), self.num_class))

            # with torch.no_grad():
            #     for _, img_unreli, _, _, index in labeled_loader:
            #         img_unreli = img_unreli.to(self.device)
            #         outputs = self.pl_model(img_unreli)
            #         sm_outputs = F.softmax(outputs, dim=1)
            #         preds = sm_outputs.argmax(dim=1)
            #         pseudo_y_unreliable_subset[index] = preds.cpu().clone().detach()
            #         pseudo_y_soft[index] = outputs.cpu().clone().detach()
            self.pseudo_hard_label = self.idx_to_hard(self.hard_to_idx(self.pseudo_soft_label))
            # self.pseudo_soft_label[self.unreliable_sub_idx] = pseudo_y_soft.clone().detach()
            # self.pseudo_hard_label[self.unreliable_sub_idx_unique] = self.idx_to_hard(pseudo_y_unreliable_subset)
            # self.pseudo_soft_label[self.unreliable_sub_idx_unique] = pseudo_y_soft.clone().detach()

            if printlog:
                pseudo_y_unreliable_subset_unique = self.hard_to_idx(self.pseudo_hard_label[self.unreliable_sub_idx_unique])
                true_y_unreliable_subset = self.train_loader.dataset.train_labels_gt[self.unreliable_sub_idx_unique]
                pseudo_acc_unreliable_subset = (pseudo_y_unreliable_subset_unique.tolist() == true_y_unreliable_subset).sum() / len(true_y_unreliable_subset)
                print("After fine-tuning epoch {}, accuracy of unreliable subset: {}. ".format(epoch, pseudo_acc_unreliable_subset))
                pseudo_soft_label_idx = self.hard_to_idx(self.pseudo_soft_label)
                pseudo_soft_label_acc = (pseudo_soft_label_idx.tolist() == self.train_loader.dataset.train_labels_gt).sum() / len(self.train_loader.dataset.train_labels_gt)
                print("After updating soft labels, accuracy of soft labels: {}. ".format(pseudo_soft_label_acc))

    def set_basic_config(self, base_model, train_loader):
        self.base_model = copy.deepcopy(base_model)
        self.train_loader = copy.deepcopy(train_loader)
        # idx
        self.all_idx = [ i for i in range(0, len(self.train_loader.dataset))]

    def initialize_pseudolabeling(self):
        # pseudo labeler
        self.pl_model = copy.deepcopy(self.base_model)
        # pseudo labels
        if self.num_class == 10 or self.num_class == 100:  # CIFAR10 or CIFAR100
            self.pseudo_hard_label = self.idx_to_hard(torch.LongTensor(self.train_loader.dataset.train_labels))
        elif self.num_class == 14 or self.num_class == 50:  # Clothing1M or WebVision
            self.pseudo_hard_label = self.idx_to_hard(torch.LongTensor(list(self.train_loader.dataset.train_labels.values())))
        
        self.pseudo_soft_label = copy.deepcopy(self.pseudo_hard_label)
        

    def update_soft_label(self, momentum=0.1, printlog=True):
        self.pseudo_soft_label = momentum * self.pseudo_hard_label + (1 - momentum) * self.pseudo_soft_label 
        if printlog:
            pseudo_soft_label_idx = self.hard_to_idx(self.pseudo_soft_label)
            pseudo_soft_label_acc = (pseudo_soft_label_idx.tolist() == self.train_loader.dataset.train_labels_gt).sum() / len(self.train_loader.dataset.train_labels_gt)
            print("After updating soft labels, accuracy of soft labels: {}. ".format(pseudo_soft_label_acc))

    def soft_to_hard(self, x):
        with torch.no_grad():
            return (torch.zeros(len(x), self.num_class)).cuda().scatter_(1, (x.argmax(dim=1)).view(-1,1), 1)
    
    def idx_to_hard(self, x):
        with torch.no_grad():
            return torch.zeros(len(x), self.num_class).scatter_(1, x.view(-1,1), 1)
    
    def hard_to_idx(self, x):
        return torch.argmax(x, dim=1, keepdim=False)
        

class cifar_dataset_for_finetune(Dataset):
    def __init__(self, train_dataset, reliable_idx, unreliable_idx, unreliable_sub_idx, mode): 
        self.train_dataset = train_dataset
        self.train_data = train_dataset.train_data
        self.noise_label = train_dataset.train_labels
        self.true_label = train_dataset.train_labels_gt
        self.ori_transform = transforms.Compose([
                # transforms.RandomCrop(32, padding=4),
                # transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
            ])
        self.weak_transform = transforms.Compose([
                # transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
            ])
        self.strong_transform = transforms.Compose([
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
            ])
        
        self.reliable_idx = reliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx
        self.unlabeled_idx = list(set(unreliable_idx) - set(unreliable_sub_idx))

        self.mode = mode
        
        if mode == 'labeled':
            self.train_data_reliable = self.train_data[self.reliable_idx]
            self.train_data_unreliable = self.train_data[self.unreliable_sub_idx]
            self.noise_label_reliable = self.noise_label[self.reliable_idx]
            self.true_label_reliable = self.true_label[self.reliable_idx]
        if mode == 'unlabeled':
            self.train_data_unlabeled = self.train_data[self.unlabeled_idx]
            self.true_label_unlabeled = self.true_label[self.unlabeled_idx]
        
    def __getitem__(self, index):
        if self.mode=='labeled':
            img_reli, img_unreli, target, true_label = self.train_data_reliable[index], self.train_data_unreliable[index], self.noise_label_reliable[index], self.true_label_reliable[index]
            img_reli = Image.fromarray(img_reli)
            img_reli_1 = self.ori_transform(img_reli)
            img_reli_2 = self.weak_transform(img_reli)
            img_reli_3 = self.strong_transform(img_reli)
            img_unreli = Image.fromarray(img_unreli)
            img_unreli = self.ori_transform(img_unreli) 
            # img_unreli = self.weak_transform(img_unreli)         
            return (img_reli_1, img_reli_2, img_reli_3), img_unreli, target, true_label, index
            # return (img_reli_1, ), img_unreli, target, true_label, index
        
        elif self.mode=='unlabeled':
            img = self.train_data_unlabeled[index]
            img = Image.fromarray(img)
            img = self.ori_transform(img)            
            return img
           
    def __len__(self):
        if self.mode=='labeled':     
            return len(self.train_data_reliable)
        elif self.mode=='unlabeled':
            return len(self.train_data_unlabeled)

# cifar finetune dataset
class cifar_dataloader_finetune:
    def __init__(self, train_loader, reliable_idx, unreliable_idx, unreliable_sub_idx):
        self.dataset = train_loader.dataset
        self.batch_size = train_loader.batch_size
        self.num_workers = train_loader.num_workers
        self.reliable_idx = reliable_idx
        self.unreliable_idx = unreliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx

    def run(self, mode):
        if mode == 'labeled':
            labeled_dataset = cifar_dataset_for_finetune(self.dataset, self.reliable_idx, self.unreliable_idx, self.unreliable_sub_idx, mode=mode)                
            labeled_loader = DataLoader(
                dataset=labeled_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers)             
            return labeled_loader

        if mode == 'unlabeled':
            unlabeled_dataset = cifar_dataset_for_finetune(self.dataset, self.reliable_idx, self.unreliable_idx, self.unreliable_sub_idx, mode=mode)                
            unlabeled_loader = DataLoader(
                dataset=unlabeled_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers)             
            return unlabeled_loader

class webvision_dataset_for_finetune(Dataset):
    def __init__(self, train_dataset, reliable_idx, unreliable_idx, unreliable_sub_idx, mode, root): 
        self.train_dataset = train_dataset
        self.train_data = np.array(list(train_dataset.train_labels.keys()))
        self.noise_label = np.array(list(train_dataset.train_labels.values()))
        self.true_label = np.array(list(train_dataset.train_labels.values()))
        
        self.ori_transform = transforms.Compose([
                transforms.CenterCrop(227),
                transforms.ToTensor(),
                transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),
            ])
        self.weak_transform = transforms.Compose([
                transforms.RandomCrop(227),
                transforms.ToTensor(),
                transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),
            ]) 
        self.strong_transform = transforms.Compose([
                transforms.RandomCrop(227),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),
            ]) 
        
        
        self.reliable_idx = reliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx
        self.unlabeled_idx = list(set(unreliable_idx) - set(unreliable_sub_idx))

        self.mode = mode
        self.root = root
        
        if mode == 'labeled':
            self.train_data_reliable = self.train_data[self.reliable_idx]
            self.train_data_unreliable = self.train_data[self.unreliable_sub_idx]
            self.noise_label_reliable = self.noise_label[self.reliable_idx]
            self.true_label_reliable = self.true_label[self.reliable_idx]
        if mode == 'unlabeled':
            self.train_data_unlabeled = self.train_data[self.unlabeled_idx]
            self.true_label_unlabeled = self.true_label[self.unlabeled_idx]
        
    def __getitem__(self, index):

        if self.mode=='labeled':
            img_reli, img_unreli, target, true_label = self.train_data_reliable[index], self.train_data_unreliable[index], self.noise_label_reliable[index], self.true_label_reliable[index]
            
            img_reli = Image.open(self.root+img_reli)
            img_reli = img_reli.convert('RGB')
            
            img_reli_1 = self.ori_transform(img_reli)
            img_reli_2 = self.weak_transform(img_reli)
            img_reli_3 = self.strong_transform(img_reli)

            img_unreli = Image.open(self.root+img_unreli)
            img_unreli = img_unreli.convert('RGB')

            img_unreli = self.ori_transform(img_unreli) 
            # img_unreli = self.weak_transform(img_unreli)         
            return (img_reli_1, img_reli_2, img_reli_3), img_unreli, target, true_label, index
            # return (img_reli_1, ), img_unreli, target, true_label, index
        
        elif self.mode=='unlabeled':
            img = self.train_data_unlabeled[index]
            img = Image.open(self.root+img)
            img = img.convert('RGB')
            img = self.ori_transform(img)            
            return img
           
    def __len__(self):
        if self.mode=='labeled':     
            return len(self.train_data_reliable)
        elif self.mode=='unlabeled':
            return len(self.train_data_unlabeled)

# webvision finetune dataset
class webvision_dataloader_finetune:
    def __init__(self, train_loader, reliable_idx, unreliable_idx, unreliable_sub_idx):
        self.dataset = train_loader.dataset
        self.batch_size = train_loader.batch_size
        self.num_workers = train_loader.num_workers
        self.reliable_idx = reliable_idx
        self.unreliable_idx = unreliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx
        self.root = train_loader.dataset.root

    def run(self, mode):
        if mode == 'labeled':
            labeled_dataset = webvision_dataset_for_finetune(self.dataset, self.reliable_idx, self.unreliable_idx, self.unreliable_sub_idx, mode=mode, root = self.root)                
            labeled_loader = DataLoader(
                dataset=labeled_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers)             
            return labeled_loader

        if mode == 'unlabeled':
            unlabeled_dataset = webvision_dataset_for_finetune(self.dataset, self.reliable_idx, self.unreliable_idx, self.unreliable_sub_idx, mode=mode, root = self.root)                
            unlabeled_loader = DataLoader(
                dataset=unlabeled_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers)             
            return unlabeled_loader

# finetune loss
def linear_rampup(current, warm_up, lambda_u=25, rampup_length=16):
    current = np.clip((current-warm_up) / rampup_length, 0.0, 1.0)
    return lambda_u * float(current)

class SemiLoss(object):
    def __call__(self, outputs_x, targets_x, outputs_u, targets_u, epoch, warm_up):
        probs_u = torch.softmax(outputs_u, dim=1)

        Lx = -torch.mean(torch.sum(F.log_softmax(outputs_x, dim=1) * targets_x, dim=1))
        Lu = torch.mean((probs_u - targets_u)**2)

        return Lx, Lu, linear_rampup(epoch,warm_up)
    
class ConsistencyLoss(object):
    def __call__(self, output1, output2):
        preds1 = F.softmax(output1, dim=1).detach()
        preds2 = F.log_softmax(output2, dim=1)
        loss_kldiv = F.kl_div(preds2, preds1, reduction='none')
        loss_kldiv = torch.sum(loss_kldiv, dim=1)
        return torch.mean(loss_kldiv)

class ConsistencyLoss2(object):
    def __call__(self, output1, output2):
        preds1 = F.softmax(output1, dim=1).detach()
        preds2 = F.softmax(output2, dim=1)
        return torch.mean(torch.sum((preds1 - preds2)**2, dim=1))
    
def wce(outputs, weights):
    return (- torch.mean(torch.sum(F.log_softmax(outputs, dim=1) * weights, dim=1)))
 
    