import numpy as np
import torch
from tqdm import tqdm
from typing import List
from torchvision.utils import make_grid

# + dynamically change
from base import BaseTrainer_pl_batch_stand as BaseTrainer
# - dynamically change

from utils import inf_loop, linear_rampup, sigmoid_rampup, linear_rampdown
import sys
import torch.nn.functional as F
import warnings
import copy
from torch.autograd import Variable
warnings.filterwarnings("ignore", category=DeprecationWarning)
import functools

# + Boosting Pseudo Labels Functions and Classes
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from sklearn.mixture import GaussianMixture
from PIL import Image
import random
import time
import math
import os

from global_var import *

class KNN_solver:
    def __init__(self, A, B, device) -> None:
        self.A = A
        self.B = B
        self.device = device

    def compute_distances_tensor(self, A, B, device):
        A = A.to(device)
        B = B.to(device)
        m = A.shape[0]
        n = B.shape[0]
        M = A @ B.T
        H = torch.square(A).sum(dim=1, keepdim=True).repeat(1,n)
        K = torch.square(B).sum(dim=1, keepdim=True).T.repeat(m,1)
        # print(A.device, B.device, M.device, H.device, K.device)
        return torch.sqrt(-2 * M + H + K)

    def nearest_neighbor(self, num_subset_A = 10000, num_subset_B = 10000):
        def gen_start_end(num_set, num_subset):
            len_gen = (num_set // num_subset) if (num_set % num_subset == 0) else  (num_set // num_subset + 1)
            for i in range(len_gen):
                start_idx = i * num_subset
                end_idx = min((i + 1) * num_subset, num_set)
                yield start_idx, end_idx

        num_set_A, num_set_B = self.A.shape[0], self.B.shape[0]

        A_start_end_iter = enumerate(gen_start_end(num_set_A, num_subset_A))
        B_start_end_iter = enumerate(gen_start_end(num_set_B, num_subset_B))

        C = torch.zeros((num_set_A, num_set_B), dtype=torch.float32)
        for i, (A_start_idx, A_end_idx) in A_start_end_iter:
            for j, (B_start_idx, B_end_idx) in B_start_end_iter:
                A_sub = self.A[A_start_idx:A_end_idx]
                B_sub = self.B[B_start_idx:B_end_idx]
                C_sub = self.compute_distances_tensor(A_sub, B_sub, self.device)
                C[A_start_idx:A_end_idx, B_start_idx:B_end_idx] = C_sub.clone().detach().cpu()

        nearest_idx = C.argmin(dim=1).numpy()
        return nearest_idx
    
def nn_mixup_two_targets(x1, x2, y1, y2, alpha=1.0, device='cuda', is_bias=False):
    """Returns nn-mixed inputs, pairs of targets, and lambda
    """
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1
    if is_bias: lam = max(lam, 1-lam)

    mixed_x = lam*x1 + (1-lam)*x2
    y_a, y_b = y1, y2 
    return mixed_x, y_a, y_b, lam

def mixup_ce_loss_with_softmax(preds, targets_a, targets_b, lam):
    """ mixed categorical cross-entropy loss
    """
    mixup_loss_a = -torch.mean(torch.sum(F.softmax(targets_a,1)* F.log_softmax(preds, dim=1), dim=1))
    mixup_loss_b = -torch.mean(torch.sum(F.softmax(targets_b,1)* F.log_softmax(preds, dim=1), dim=1))

    mixup_loss = lam* mixup_loss_a + (1- lam)* mixup_loss_b
    return mixup_loss

def compute_distances_tensor(A, B, device='cpu'):
    print(A.device, B.device)
    A = A.to(device)
    B = B.to(device)
    print(device)
    print(A.device, B.device)
    m = A.shape[0]
    n = B.shape[0]
    M = A @ B.T
    H = torch.square(A).sum(dim=1, keepdim=True).repeat(1,n)
    K = torch.square(B).sum(dim=1, keepdim=True).T.repeat(m,1)
    print(M.device, H.device, K.device)
    return torch.sqrt(-2 * M + H + K)


def get_bpl_idx(
        embeddings,
        outputs,
        threshold=0.5,
        min_ratio=0.1,
        max_ratio=0.8,
        random_ratio=1,
        random_mode=False,
        knn_device='cpu',
        max_num=math.inf
):

    n_samples = len(embeddings)
    all_idx = [ i for i in range(0, n_samples)]

    if random_mode:
        k = int(len(all_idx) * random.uniform(a=min_ratio, b=max_ratio))
        reliable_idx = random.sample(all_idx, k=k)
        unreliable_sub_idx = random.sample(all_idx, k=k)
    else:

        # + npu change
        split_criterion_values = - (F.softmax(outputs, dim=1) * torch.log(F.softmax(outputs, dim=1))).sum(dim=1)
        # - npu change

        split_criterion_values = (split_criterion_values - split_criterion_values.min())/(split_criterion_values.max() - split_criterion_values.min())  
        split_criterion_values = split_criterion_values.reshape(-1, 1)
        # fit a two-component GMM to the loss
        gmm = GaussianMixture(n_components=2, max_iter=10, tol=1e-2, reg_covar=5e-4)
        # if torch.any(torch.isnan(split_criterion_values)):
        #     a = torch.where(torch.isnan(split_criterion_values))
        #     print(a)
        #     b = split_criterion_values[a]
        #     print(b)
        gmm.fit(split_criterion_values)
        prob = gmm.predict_proba(split_criterion_values)
        prob = prob[:, gmm.means_.argmin()]

        pred = (prob > threshold)
        reliable_idx = pred.nonzero()[0].tolist()
        kth_min, kth_max = int(n_samples * min_ratio), int(n_samples * max_ratio)
        if len(reliable_idx) < kth_min:
            threshold = np.partition(prob, kth=n_samples - kth_min)[n_samples - kth_min]
            pred = (prob > threshold)
            reliable_idx = pred.nonzero()[0].tolist()
        if len(reliable_idx) > kth_max:
            threshold = np.partition(prob, kth=n_samples - kth_max)[n_samples - kth_max]
            pred = (prob > threshold)
            reliable_idx = pred.nonzero()[0].tolist()

        unreliable_idx = list(set(all_idx) - set(reliable_idx))

        unreliable_idx = random.sample(unreliable_idx, k=min(max_num, int(len(unreliable_idx)*random_ratio)))
        unreliable_embeddings = embeddings[unreliable_idx,:]

        reliable_idx = random.sample(reliable_idx, k=min(max_num, int(len(reliable_idx)*random_ratio)))
        reliable_embeddings = embeddings[reliable_idx,:]
        print("threshold: {}".format(threshold))
        print("reliable num: {}".format(len(reliable_embeddings)))
        print("unreliable num: {}".format(len(reliable_embeddings)))
        # # + knn求与reliable最近的unreliable （sklearn cpu版本）
        # t1 = time.time()
        # from sklearn.neighbors import NearestNeighbors
        # neigh = NearestNeighbors(n_neighbors=1)
        # neigh.fit(unreliable_embeddings)
        # # 172: 20:40->
        # # Huawei: 20:47->
        # distance, neighbor_idx = neigh.kneighbors(reliable_embeddings)
        # distance, neighbor_idx = distance.flatten(), neighbor_idx.flatten()
        # print("Nearest Neighbor Module Use {} seconds. ".format(time.time()-t1))
        # # - knn求与reliable最近的unreliable （sklearn cpu版本）

        # + knn求与reliable最近的unreliable （gpu版本）
        t1 = time.time()
        # ++ embeddings转换为半精度计算，减少显存开销
        # reliable_embeddings = reliable_embeddings.type(torch.HalfTensor)
        # unreliable_embeddings = unreliable_embeddings.type(torch.HalfTensor)
        # -- embeddings转换为半精度计算，减少显存开销
        with torch.no_grad():
            solver = KNN_solver(reliable_embeddings, unreliable_embeddings, knn_device)
            neighbor_idx = solver.nearest_neighbor(5000, 5000)
            # neighbor_idx = compute_distances_tensor(
            #     reliable_embeddings, unreliable_embeddings, device=knn_device
            # ).argmin(dim=1).cpu().numpy()
        print("Nearest Neighbor Module Use {} seconds. ".format(time.time()-t1))
        # - knn求与reliable最近的unreliable （gpu版本）

        unreliable_sub_idx = [ unreliable_idx[i] for i in neighbor_idx ]

    return reliable_idx, unreliable_sub_idx

class bpl_dataset(Dataset):
    def __init__(self, train_dataset): 
        self.train_data = train_dataset.train_data
        self.transform = train_dataset.transform
        
        self.reliable_idx = [0]
        self.unreliable_sub_idx = [0] 
    
    def update_idx(self, reliable_idx, unreliable_sub_idx):
        self.reliable_idx = reliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx

    def __getitem__(self, index):
        index_reli, index_unreli = self.reliable_idx[index], self.unreliable_sub_idx[index]
        img_reli, img_unreli = self.train_data[index_reli], self.train_data[index_unreli]
        img_reli, img_unreli = Image.fromarray(img_reli), Image.fromarray(img_unreli)
        if self.transform is not None:
            img_reli, img_unreli = self.transform(img_reli), self.transform(img_unreli)
        return img_reli, img_unreli, index, index_reli, index_unreli
           
    def __len__(self):    
        return len(self.reliable_idx)
        
class bpl_dataset_for_clothing1m(Dataset):
    def __init__(self, train_dataset): 
        self.train_data = train_dataset.train_imgs
        self.transform = train_dataset.transform

        self.reliable_idx = [0]
        self.unreliable_sub_idx = [0] 

    def update_idx(self, reliable_idx, unreliable_sub_idx):
        self.reliable_idx = reliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx

    def __getitem__(self, index):
        index_reli, index_unreli = self.reliable_idx[index], self.unreliable_sub_idx[index]
        (id_raw_reli, img_path_reli), (id_raw_unreli, img_path_unreli) = self.train_data[index_reli], self.train_data[index_unreli]
        img_reli, img_unreli = Image.open(img_path_reli).convert('RGB'), Image.open(img_path_unreli).convert('RGB')
        if self.transform is not None:
            img_reli, img_unreli = self.transform(img_reli), self.transform(img_unreli)
        return img_reli, img_unreli, index, index_reli, index_unreli
           
    def __len__(self):    
        return len(self.reliable_idx)

class bpl_dataset_for_webvision(Dataset):
    def __init__(self, train_dataset): 
        self.train_data = train_dataset.train_imgs
        self.transform = train_dataset.transform
        self.root = train_dataset.root

        self.reliable_idx = [0]
        self.unreliable_sub_idx = [0] 

    def update_idx(self, reliable_idx, unreliable_sub_idx):
        self.reliable_idx = reliable_idx
        self.unreliable_sub_idx = unreliable_sub_idx

    def __getitem__(self, index):
        index_reli, index_unreli = self.reliable_idx[index], self.unreliable_sub_idx[index]
        img_path_reli, img_path_unreli = self.train_data[index_reli], self.train_data[index_unreli]
        img_reli, img_unreli = Image.open(self.root + img_path_reli).convert('RGB'), Image.open(self.root + img_path_unreli).convert('RGB')
        if self.transform is not None:
            img_reli, img_unreli = self.transform(img_reli), self.transform(img_unreli)
        return img_reli, img_unreli, index, index_reli, index_unreli
           
    def __len__(self):    
        return len(self.reliable_idx)
    
# - Boosting Pseudo Labels Functions and Classes

class Trainer(BaseTrainer):
    """
    Trainer class

    Note:
        Inherited from BaseTrainer.
    """
    def __init__(self, model1, model2, model_ema1, model_ema2, train_criterion1, train_criterion2, metrics, optimizer1, optimizer2, config, 
                 data_loader1, data_loader2,
                 valid_data_loader=None,
                 test_data_loader=None,
                 lr_scheduler1=None, lr_scheduler2=None,
                 len_epoch=None, val_criterion=None,
                 model_ema1_copy=None, model_ema2_copy=None):
        super().__init__(model1, model2, model_ema1, model_ema2, train_criterion1, train_criterion2, 
                         metrics, optimizer1, optimizer2, config, val_criterion, model_ema1_copy, model_ema2_copy)
        self.config = config.config
        self.data_loader1 = data_loader1
        self.data_loader2 = data_loader2
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader1)
        else:
            # iteration-based training
            self.data_loader1 = inf_loop(data_loader1)
            self.data_loader2 = inf_loop(data_loader2)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader

        self.test_data_loader = test_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.do_test = self.test_data_loader is not None
        self.lr_scheduler1 = lr_scheduler1
        self.lr_scheduler2 = lr_scheduler2
        self.log_step = int(np.sqrt(self.data_loader1.batch_size))
        self.train_loss_list: List[float] = []
        self.val_loss_list: List[float] = []
        self.test_loss_list: List[float] = []
        # + bpl dataloader
        if 'clothing' in self.config['name']:
            self.bpl_data_loader1 = DataLoader(
                dataset = bpl_dataset_for_clothing1m(self.data_loader1.dataset),
                batch_size = self.data_loader1.batch_size,
                shuffle=True,
                num_workers=self.data_loader1.num_workers,
                drop_last=True
            )
            self.bpl_data_loader2 = DataLoader(
                dataset = bpl_dataset_for_clothing1m(self.data_loader2.dataset),
                batch_size = self.data_loader2.batch_size,
                shuffle=True,
                num_workers=self.data_loader2.num_workers,
                drop_last=True
            )
            self.embeddings_per_epoch_1 = torch.zeros((len(self.data_loader1.dataset), 2048)) 
            self.outputs_per_epoch_1 = torch.zeros((len(self.data_loader1.dataset), self.config['num_classes'])) 
            self.embeddings_per_epoch_2 = torch.zeros((len(self.data_loader2.dataset), 2048)) 
            self.outputs_per_epoch_2 = torch.zeros((len(self.data_loader2.dataset), self.config['num_classes'])) 
        if 'cifar' in self.config['name']:
            self.bpl_data_loader1 = DataLoader(
                dataset = bpl_dataset(self.data_loader1.dataset),
                batch_size = self.data_loader1.batch_size,
                shuffle=True,
                num_workers=self.data_loader1.num_workers,
                drop_last=True
            )
            self.bpl_data_loader2 = DataLoader(
                dataset = bpl_dataset(self.data_loader2.dataset),
                batch_size = self.data_loader2.batch_size,
                shuffle=True,
                num_workers=self.data_loader2.num_workers,
                drop_last=True
            )
            self.embeddings_per_epoch_1 = torch.zeros((len(self.data_loader1.dataset), 512)) 
            self.outputs_per_epoch_1 = torch.zeros((len(self.data_loader1.dataset), self.config['num_classes'])) 
            self.embeddings_per_epoch_2 = torch.zeros((len(self.data_loader2.dataset), 512)) 
            self.outputs_per_epoch_2 = torch.zeros((len(self.data_loader2.dataset), self.config['num_classes'])) 
        if 'webvision' in self.config['name']:
            self.bpl_data_loader1 = DataLoader(
                dataset = bpl_dataset_for_webvision(self.data_loader1.dataset),
                batch_size = self.data_loader1.batch_size,
                shuffle=True,
                num_workers=self.data_loader1.num_workers,
                drop_last=True
            )
            self.bpl_data_loader2 = DataLoader(
                dataset = bpl_dataset_for_webvision(self.data_loader2.dataset),
                batch_size = self.data_loader2.batch_size,
                shuffle=True,
                num_workers=self.data_loader2.num_workers,
                drop_last=True
            )
            self.embeddings_per_epoch_1 = torch.zeros((len(self.data_loader1.dataset), 1536)) 
            self.outputs_per_epoch_1 = torch.zeros((len(self.data_loader1.dataset), self.config['num_classes'])) 
            self.embeddings_per_epoch_2 = torch.zeros((len(self.data_loader2.dataset), 1536)) 
            self.outputs_per_epoch_2 = torch.zeros((len(self.data_loader2.dataset), self.config['num_classes'])) 
        # - bpl dataloader
        
        # save pl output for analysis
        
        self.outputs_per_epoch_pl_1_1 = torch.zeros((len(self.data_loader1.dataset), self.config['num_classes']))
        self.outputs_per_epoch_pl_2_1 = torch.zeros((len(self.data_loader1.dataset), self.config['num_classes']))
        self.outputs_per_epoch_pl_1_2 = torch.zeros((len(self.data_loader1.dataset), self.config['num_classes']))
        self.outputs_per_epoch_pl_2_2 = torch.zeros((len(self.data_loader1.dataset), self.config['num_classes']))
        

    def _eval_metrics(self, output, target):
        acc_metrics = np.zeros(len(self.metrics))
        for i, metric in enumerate(self.metrics):
            acc_metrics[i] += metric(output, target)
            # self.writer.add_scalar('{}'.format(metric.__name__), acc_metrics[i])
        return acc_metrics

    def _train_epoch(self, epoch, model, model_ema, model_ema2, 
                     data_loader, bpl_data_loader, 
                     train_criterion, optimizer, lr_scheduler, 
                     embeddings_per_epoch, outputs_per_epoch,
                     outputs_per_epoch_pl_1, outputs_per_epoch_pl_2,
                     device = 'cpu', knn_device = "cpu", whichnet="-1", queue = None):
        """
        Training logic for an epoch

        :param epoch: Current training epoch.
        :return: A log that contains all information you want to save.

        Note:
            If you have additional information to record, for example:
                > additional_log = {"x": x, "y": y}
            merge it with log before return. i.e.
                > log = {**log, **additional_log}
                > return log

            The metrics in log must have the key 'metrics'.
        """
        model.train()
        model_ema.train()

        total_loss = 0
        total_metrics = np.zeros(len(self.metrics))
        total_metrics_ema = np.zeros(len(self.metrics))

        if hasattr(data_loader.dataset, 'num_raw_example'):
            num_examp = data_loader.dataset.num_raw_example
        else:
            num_examp = len(data_loader.dataset)

        local_step = 0
            
        model.train()

        if epoch > self.pl_start_epoch and epoch <= self.pl_end_epoch: 
            # + copy model
            model_pl = copy.deepcopy(model)
            model_pl = model_pl.train()
            optimizer_pl = torch.optim.SGD(model_pl.parameters(), lr=0.02)
            optimizer_pl.load_state_dict(optimizer.state_dict())
            # - copy model

            # + compute reliable_idx and unreliable_sub_idx
            # | and update the idx in the dataloader.
            if 'clothing' in self.config['name']:
                if BACKEND == "cuda":
                    max_num = 30000
                if BACKEND == "npu":
                    max_num = 25000
                reliable_idx, unreliable_sub_idx = get_bpl_idx(
                    embeddings_per_epoch, outputs_per_epoch, threshold=self.threshold, knn_device=knn_device, max_num=max_num
                )
            if 'webvision' in self.config['name']:
                if BACKEND == "cuda":
                    max_num = 20000
                if BACKEND == "npu":
                    max_num = 25000
                reliable_idx, unreliable_sub_idx = get_bpl_idx(
                    embeddings_per_epoch, outputs_per_epoch, threshold=self.threshold, knn_device=knn_device, max_num=max_num
                )
            if 'cifar' in self.config['name']:
                reliable_idx, unreliable_sub_idx = get_bpl_idx(
                    embeddings_per_epoch, outputs_per_epoch, knn_device=knn_device
                )
            bpl_data_loader.dataset.update_idx(reliable_idx, unreliable_sub_idx)
            bpl_train_iter = iter(bpl_data_loader)
            acc_array_before = np.zeros((len(data_loader.dataset), ))
            acc_array_after = np.zeros((len(data_loader.dataset), ))
            # - compute reliable_idx and unreliable_sub_idx

        # + train with bpl
        with tqdm(data_loader, disable=True) as progress:
            for batch_idx, (data, target, indexs, _) in enumerate(progress):
                progress.set_description_str(f'Train epoch {epoch}')

                # + base model training
                data_original = data
                target_original = target

                target = torch.zeros(len(target), self.config['num_classes']).scatter_(1, target.view(-1,1), 1)  

                data, target, target_original = data.to(device), target.float().to(device), target_original.to(device)
                
                data, target, mixup_l, mix_index = self._mixup_data(data, target,  alpha = self.config['mixup_alpha'], device = device)
                
                output = model(data)

                data_original = data_original.to(device)
                output_original  = model_ema2(data_original)
                output_original = output_original.data.detach()

                train_criterion.update_hist(epoch, output_original, indexs.numpy().tolist(), mix_index = mix_index, mixup_l = mixup_l)
                local_step += 1
                loss, probs = train_criterion(self.global_step + local_step, output, target)                

                optimizer.zero_grad()
                loss.backward() 
                optimizer.step()
                # - base model training

                if epoch > self.pl_start_epoch and epoch <= self.pl_end_epoch:
                    # + bpl model training
                    # ++ compute supervised loss
                    output_pl = model_pl(data)
                    loss_pl_sup, probs = train_criterion(self.global_step + local_step, output_pl, target)
                    # -- compute supervised loss

                    # ++ compute bpl regularization loss
                    try:
                        data_reli, data_unreli, bpl_index, bpl_index_reli, bpl_index_unreli = next(bpl_train_iter)
                    except:
                        bpl_train_iter = iter(bpl_data_loader)
                        data_reli, data_unreli, bpl_index, bpl_index_reli, bpl_index_unreli = next(bpl_train_iter)
                    # print(bpl_index)
                    data_reli, data_unreli, bpl_index = data_reli.to(device), data_unreli.to(device), bpl_index.to(device)
                    data_reli_list = [data_reli, ]
                    # +++ mean-teacher
                    with torch.no_grad():
                        ema_outputs_unreli = model_ema2(data_unreli)
                        ema_outputs_reli_list = list(map(lambda x: model_ema2(x), data_reli_list))
                    # --- mean-teacher
                    
                    # +++ mix up for consistency loss
                    partial_nn_mixup_two_targets = functools.partial(nn_mixup_two_targets, x2=data_unreli, y2=ema_outputs_unreli, alpha=1.0, device=self.device, is_bias=False)
                    results_nn_mixup_two_targets = list(map(lambda x, y: partial_nn_mixup_two_targets(x1=x, y1=y), data_reli_list, ema_outputs_reli_list))
                    mixed_outputs_u_list = list(map(lambda x: model_pl(x[0]), results_nn_mixup_two_targets))
                    mix_loss_list = list(map(lambda x, y: mixup_ce_loss_with_softmax(x, y[1], y[2], y[3]), mixed_outputs_u_list, results_nn_mixup_two_targets))
                    loss_pl_reg = sum(mix_loss_list) / len(mix_loss_list)
                    # --- mix up for consistency loss

                    # -- compute bpl regularization loss

                    loss_pl = loss_pl_sup + loss_pl_reg

                    optimizer_pl.zero_grad()
                    loss_pl.backward()
                    optimizer_pl.step()
                    # - bpl model training

                    # + post-processing after training model pl
                    # ++ calculate accuracy before updating
                    preds_before = train_criterion.pred_hist[bpl_index_unreli,:].argmax(dim=1).cpu().numpy()
                    acc_array_before[bpl_index_unreli] = preds_before
                    # -- calculate accuracy before updating
                    with torch.no_grad():
                        outputs_unreli = model_pl(data_unreli)
                        sm_outputs_unreli = F.softmax(outputs_unreli, dim=1).clone().detach()
                        outputs_per_epoch_pl_2[bpl_index_unreli,:] = sm_outputs_unreli.clone().detach().cpu()
                        batch_outputs_pl = model_pl(data_original).clone().detach().cpu()
                        outputs_per_epoch_pl_1[indexs, :] = batch_outputs_pl


                    train_criterion.pred_hist[bpl_index_unreli,:] = self.bpl_mom * train_criterion.pred_hist[bpl_index_unreli,:] + \
                        (1 - self.bpl_mom) * sm_outputs_unreli
                    
                    # ++ calculate accuracy after updating
                    preds_after = train_criterion.pred_hist[bpl_index_unreli,:].argmax(dim=1).cpu().numpy()
                    acc_array_after[bpl_index_unreli] = preds_after
                    # -- calculate accuracy after updating          
                    # - post-processing after training model_pl


                # + post-processing after training model
                # ++ compute and save embeddings and outputs
                with torch.no_grad():
                    batch_embeddings = torch.flatten(model.last_k_layer_forward(
                        data_original, k=-2, lin=0, lout=5), 
                        start_dim=1, end_dim=-1).clone().detach().cpu()
                    batch_outputs = model(data_original).clone().detach().cpu()
                    embeddings_per_epoch[indexs, :] = F.normalize(input=batch_embeddings, dim=1)
                    outputs_per_epoch[indexs, :] = batch_outputs
                    
                # -- compute and save embeddings and outputs

                self.update_ema_variables(model, model_ema, self.global_step + local_step, self.config['ema_alpha'])
                self.train_loss_list.append(loss.item())
                total_loss += loss.item()
                total_metrics += self._eval_metrics(output, target.argmax(dim=1))
                if output_original is not None:
                    total_metrics_ema += self._eval_metrics(output_original, target.argmax(dim=1))


                if batch_idx % self.log_step == 0:
                    progress.set_postfix_str(' {} Loss: {:.6f}'.format(
                        self._progress(batch_idx),
                        loss.item()))

                if batch_idx == self.len_epoch:
                    break

                # - post-processing after training model
        # save_dict = {
        #     "train_labels_gt": data_loader.dataset.train_labels_gt
        # }
        # save_path = "results_output/{}/".format(self.config['name'])
        # if not os.path.exists(save_path):
        #     os.makedirs(save_path)
        # torch.save(save_dict, save_path + "train_labels_gt.pt")
        # exit()
        # - train with bpl
        if epoch > self.pl_start_epoch and epoch <= self.pl_end_epoch:
            unreliable_sub_idx_unique = list(set(unreliable_sub_idx))
            trues = data_loader.dataset.train_labels_gt
            acc_before = (trues[unreliable_sub_idx_unique] == acc_array_before[unreliable_sub_idx_unique]).sum() / len(unreliable_sub_idx_unique)
            acc_after = (trues[unreliable_sub_idx_unique] == acc_array_after[unreliable_sub_idx_unique]).sum() / len(unreliable_sub_idx_unique)
            print("Pseudo Label Acc change from {} to {}".format(acc_before, acc_after))
            if False:
                save_dict = {
                    "train_labels": data_loader.dataset.train_labels,
                    "train_labels_gt": trues,
                    "outputs": outputs_per_epoch,
                    "outputs_pl_1": outputs_per_epoch_pl_1,
                    "outputs_pl_2": outputs_per_epoch_pl_2,
                    "unreliable_idx": unreliable_sub_idx,
                    "reliable_idx": reliable_idx,
                    "whichnet": whichnet,
                    "epoch": epoch
                }
                save_path = "results_output/{}/".format(self.config['name'])
                if not os.path.exists(save_path):
                    os.makedirs(save_path)
                torch.save(save_dict, save_path + "net{}_epoch={}.pt".format(whichnet, epoch))
                # 
                _, preds_before = torch.max(outputs_per_epoch.data[unreliable_sub_idx_unique], 1)
                acc_before = (trues[unreliable_sub_idx_unique] == preds_before.numpy()).sum() / len(unreliable_sub_idx_unique)
                _, preds_after = torch.max(outputs_per_epoch_pl_1.data[unreliable_sub_idx_unique], 1)
                acc_after= (trues[unreliable_sub_idx_unique] == preds_after.numpy()).sum() / len(unreliable_sub_idx_unique)
                print("Model Acc change from {} to {}".format(acc_before, acc_after))

        if hasattr(data_loader, 'run'):
            data_loader.run()


        log = {
            'global step': self.global_step,
            'local_step': local_step,
            'loss': total_loss / self.len_epoch,
            'metrics': (total_metrics / len(data_loader.dataset)).tolist(),
            'metrics_ema': (total_metrics_ema / len(data_loader.dataset)).tolist(),
            'learning rate': lr_scheduler.get_last_lr()
        }


        if lr_scheduler is not None:
            lr_scheduler.step()

        if queue is None:
            return log
        else:
            queue.put(log)


    def _valid_epoch(self, epoch, model1, model2, device = 'cpu', queue = None):
        """
        Validate after training an epoch

        :return: A log that contains information about validation

        Note:
            The validation metrics in log must have the key 'val_metrics'.
        """
        model1.eval()
        model2.eval()

        total_val_loss = 0
        total_val_metrics = np.zeros(len(self.metrics))
        with torch.no_grad():
            with tqdm(self.valid_data_loader, disable=True) as progress:
                for batch_idx, (data, _, _, target) in enumerate(progress):
                    progress.set_description_str(f'Valid epoch {epoch}')
                    data, target = data.to(device), target.to(device)
                    
                    output1 = model1(data)
                    output2 = model2(data)

                    output = 0.5*(output1 + output2)

                    loss = self.val_criterion(output, target)

                    # self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')
                    # self.writer.add_scalar('loss', loss.item())
                    self.val_loss_list.append(loss.item())
                    total_val_loss += loss.item()
                    total_val_metrics += self._eval_metrics(output, target)
                    # self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))

        # #add histogram of model parameters to the tensorboard
        # for name, p in model.named_parameters():
        #     self.writer.add_histogram(name, p, bins='auto')

        if queue is None:
            return {
                'val_loss': total_val_loss / len(self.valid_data_loader),
                'val_metrics': (total_val_metrics / len(self.valid_data_loader.dataset)).tolist()
            }
        else:
            queue.put({
                'val_loss': total_val_loss / len(self.valid_data_loader),
                'val_metrics': (total_val_metrics / len(self.valid_data_loader.dataset)).tolist()
            })

    def _test_epoch(self, epoch, model1, model2, device = 'cpu', queue = None):
        """
        Test after training an epoch

        :return: A log that contains information about test

        Note:
            The Test metrics in log must have the key 'val_metrics'.
        """
        model1.eval()
        model2.eval()

        total_test_loss = 0
        total_test_metrics = np.zeros(len(self.metrics))
        with torch.no_grad():
            with tqdm(self.test_data_loader, disable=True) as progress:
                for batch_idx, (data, _, indexs, target) in enumerate(progress):
                    progress.set_description_str(f'Test epoch {epoch}')
                    data, target = data.to(device), target.to(device)

                    output1 = model1(data)
                    output2 = model2(data)
                    
                    output = 0.5*(output1 + output2)
                    loss = self.val_criterion(output, target)
                    # self.writer.set_step((epoch - 1) * len(self.test_data_loader) + batch_idx, 'test')
                    # self.writer.add_scalar('loss', loss.item())
                    self.test_loss_list.append(loss.item())
                    total_test_loss += loss.item()
                    total_test_metrics += self._eval_metrics(output, target)
                    # self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))

            

        #add histogram of model parameters to the tensorboard
        # for name, p in model1.named_parameters():
            # self.writer.add_histogram(name, p, bins='auto')
        if queue is None:
            return {
                'test_loss': total_test_loss / len(self.test_data_loader),
                'test_metrics': (total_test_metrics / len(self.test_data_loader.dataset)).tolist()
            }
        else:
            queue.put({
                'test_loss': total_test_loss / len(self.test_data_loader),
                'test_metrics': (total_test_metrics / len(self.test_data_loader.dataset)).tolist()
            })


    def _warmup_epoch(self, epoch, model, data_loader, optimizer, train_criterion, lr_scheduler, device = 'cpu', queue = None):
        total_loss = 0
        total_metrics = np.zeros(len(self.metrics))
        model.train()

        with tqdm(data_loader) as progress:
            for batch_idx, (data, target, indexs , _) in enumerate(progress):
                progress.set_description_str(f'Train epoch {epoch}')

                data, target = data.to(device), target.long().to(device)
                optimizer.zero_grad()
                output = model(data)
                out_prob = output.data.detach()
                
                train_criterion.update_hist(epoch, out_prob ,indexs.cpu().detach().numpy().tolist())

                loss = torch.nn.functional.cross_entropy(output, target)
        
                loss.backward() 
                optimizer.step()

                # self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
                # self.writer.add_scalar('loss', loss.item())
                self.train_loss_list.append(loss.item())
                total_loss += loss.item()
                total_metrics += self._eval_metrics(output, target)


                if batch_idx % self.log_step == 0:
                    progress.set_postfix_str(' {} Loss: {:.6f}'.format(
                        self._progress(batch_idx),
                        loss.item()))
                

                if batch_idx == self.len_epoch:
                    break

        log = {
            'loss': total_loss / self.len_epoch,
            'noise detection rate' : 0.0,
            'metrics': (total_metrics / self.len_epoch).tolist(),
            'learning rate': lr_scheduler.get_lr()
        }
        if queue is None:
            return log
        else:
            queue.put(log)

    # def eval_train(self, epoch, model_ema2, train_criterion):
    #     #model.eval()
    #     num_samples = args.num_batches*args.batch_size
    #     losses = torch.zeros(num_samples)
    #     with torch.no_grad():
    #         for batch_idx, (inputs, targets, path) in enumerate(eval_loader):
    #             inputs, targets = inputs.cuda(), targets.cuda()  
    #             output0  = model_ema2(inputs)
    #             output0 = output0.data.detach()
    #             output1, output2, output3 = None, None, None
    #             train_criterion.update_hist(epoch, output0, output1, output2, output3, indexs.numpy().tolist(),mix_index = mix_index, mixup_l = mixup_l)


    def update_ema_variables(self, model, model_ema, global_step, alpha_=0.997):
        # Use the true average until the exponential average is more correct
        if alpha_ == 0:
            ema_param.data = param.data
        else:
            if self.config['ema_update']:
                alpha = sigmoid_rampup(global_step + 1, self.config['ema_step'])*alpha_
            else:
                alpha = min(1 - 1 / (global_step + 1), alpha_)
            for ema_param, param in zip(model_ema.parameters(), model.parameters()):
                ema_param.data.mul_(alpha).add_(param.data, alpha = 1 - alpha)


    def _progress(self, batch_idx):
        base = '[{}/{} ({:.0f}%)]'
        if hasattr(self.data_loader1, 'n_samples'):
            current = batch_idx * self.data_loader1.batch_size
            total = self.data_loader1.n_samples
        else:
            current = batch_idx
            total = self.len_epoch
        return base.format(current, total, 100.0 * current / total)

    def _mixup_data(self, x, y, alpha=1.0,  device = 'cpu'):
        '''Returns mixed inputs, pairs of targets, and lambda'''
        if alpha > 0:
            lam = np.random.beta(alpha, alpha)
            lam = max(lam, 1-lam)
            batch_size = x.size()[0]
            mix_index = torch.randperm(batch_size).to(device)

            mixed_x = lam * x + (1 - lam) * x[mix_index, :]#
            mixed_target = lam * y + (1 - lam) * y[mix_index, :]


            return mixed_x, mixed_target, lam, mix_index
        else:
            lam = 1
            return x, y, lam, ...


    def _mixup_criterion(self, pred, y_a, y_b, lam, *args):
        loss_a, prob_a, entropy_a= self.train_criterion(pred, y_a, *args)
        loss_b, porb_b, entropy_b = self.train_criterion(pred, y_b, *args)
        return lam * loss_a + (1 - lam) * loss_b, lam * prob_a + (1-lam) * porb_b, lam * entropy_a + (1-lam) * entropy_b
