from functools import partial
import os
import pprint
from re import L
from turtle import distance
from cv2 import getPerspectiveTransform, solve
import torch
import numpy as np
import math
import random
import scipy
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.modules.loss import TripletMarginWithDistanceLoss
from torch.utils.tensorboard import writer
from torchvision.transforms.functional import rotate
import torch.optim as optim
import Models.models.global_writer as global_writer
from Models.models.semi_loss import linear_rampup 


def save_list_to_txt(name, input_list):
    f = open(name, mode='a')
    for item in input_list:
        f.write(item+'\n')
    f.close()


def set_gpu(args):
    gpu_list = [int(x) for x in args.gpu.split(',')]
    print('use gpu:', gpu_list)
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    return gpu_list.__len__()


def ensure_path(path):
    if os.path.exists(path):
        pass
    else:
        print('create folder:', path)
        os.makedirs(path)
    print("save path: ", path)


class Averager():

    def __init__(self):
        self.n = 0
        self.v = 0

    def add(self, x):
        self.v = (self.v * self.n + x) / (self.n + 1)
        self.n += 1

    def item(self):
        return self.v


def count_acc(logits, label):
    pred = torch.argmax(logits, dim=1)
    # index = torch.arange(len(label))
    # values, indices = logits.max(1)
    # logits_zip_t = list(zip(values, pseudo_y==query_targets, presudo_y, query_targets, index))
    # logits_rank_t = sorted(logits_zip_t, key=lambda x:x[0], reverse=True)
    if torch.cuda.is_available():
        return (pred == label).type(torch.cuda.FloatTensor).mean().item()
    else:
        return (pred == label).type(torch.FloatTensor).mean().item()


_utils_pp = pprint.PrettyPrinter()


def pprint(x):
    _utils_pp.pprint(x)


def compute_confidence_interval(data):
    """
    Compute 95% confidence interval
    :param data: An array of mean accuracy (or mAP) across a number of sampled episodes.
    :return: the 95% confidence interval for this data.
    """
    a = 1.0 * np.array(data)
    m = np.mean(a)
    std = np.std(a)
    pm = 1.96 * (std / np.sqrt(len(a)))
    return m, pm

def mean_confidence_interval(data, confidence=0.95):
    a = 1.0*np.array(data)
    n = len(a)
    m, se = np.mean(a), scipy.stats.sem(a)
    h = se * scipy.stats.t._ppf((1+confidence)/2., n-1)
    return m, h


def load_model(model, dir, print_mode=True):
    if isinstance(model, torch.nn.parallel.DataParallel):
        model_dict = model.module.state_dict()
    else:
        model_dict = model.state_dict()
    if print_mode:
        print('loading model from :', dir)
    pretrained_dict = torch.load(dir)['params']
    if 'encoder' in list(pretrained_dict.keys())[0]:
        # load from a parallel meta-trained model
        if 'module' in list(pretrained_dict.keys())[0]:
            pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items()}
        else:
            pretrained_dict = {k: v for k, v in pretrained_dict.items()}
    else:
        # load from a pretrained model
        pretrained_dict = {'encoder.' + k: v for k,
                           v in pretrained_dict.items()}
    pretrained_dict = {k: v for k,
                       v in pretrained_dict.items() if k in model_dict}
    # update the param in encoder, remain others still
    model_dict.update(pretrained_dict)
    if isinstance(model, torch.nn.parallel.DataParallel):
        model.module.load_state_dict(model_dict, strict=True)
    else:
        model.load_state_dict(model_dict, strict=True)  # strict 默认为True

    return model


def set_seed(seed):
    if seed == 0:
        print(' random seed')
        torch.backends.cudnn.benchmark = True
    else:
        print('manual seed:', seed)
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False


def detect_grad_nan(model):
    for param in model.parameters():
        if (param.grad != param.grad).float().sum() != 0:  # nan detected
            param.grad.zero_()


def print_model_params(model, params):
    total_params = sum(p.numel() for p in model.parameters())
    total_buffers = sum(q.numel() for q in model.buffers())
    print("\033[1;32;m{}\033[0m model \033[1;32;m{}\033[0m backbone have \033[1;32;m{}\033[0m parameters.".format(
        model.__class__.__name__, params.model, total_params + total_buffers))
    total_trainable_params = sum(p.numel()
                                 for p in model.parameters() if p.requires_grad)
    print("\033[1;32;m{}\033[0m model \033[1;32;m{}\033[0m backbone have \033[1;32;m{}\033[0m training parameters.".format(
        model.__class__.__name__, params.model, total_trainable_params))

def get_embedding(model, input, device):
    batch_size = 64
    if input.shape[0] > batch_size:
        embed = []
        i = 0
        while i <= input.shape[0]-1:
            embedding = model.module.encode(input[i:i+batch_size].to(device), dense=False)
            # 两次 squeeze 将 [batch_size, C ,1, 1] -> [batch_size, C]
            if len(embedding.shape) > 2:
                embedding = embedding.squeeze(-1).squeeze(-1)
            embed.append(
                embedding.detach().cpu())
            i += batch_size
        embed = torch.cat(embed)
    else:
        embedding = model.module.encode(input.to(device), dense=False)
        if len(embedding.shape) > 2:
                embedding = embedding.squeeze(-1).squeeze(-1)
        embed = embedding.detach().cpu()
    assert embed.shape[0] == input.shape[0]
    return embed.numpy()

def shuffle_batch(x, groups):
    '''
        Channel shuffle: [N,C,H,W] -> [groups, N / groups, C,H,W] -> [N / groups, groups, C,H,W] -> [N,C,H,W]
        一共C个channel要分成g组混合的channel，先把C reshape成(g, C/g)的形状，然后转置成(C/g, g)最后平坦成C组channel
        参考shuffle channel
    '''
    N, C, H, W = x.size()
    return x.view(int(groups), int(N / groups), C, H, W).permute(1, 0, 2, 3, 4).contiguous().view(N, C, H, W)  # 因为x之前view过了，他的内存不连续了，需要contiguous来规整一下

def label2onehot(label, num_class):
    result = torch.zeros((label.shape[0], num_class))
    for ind, num in enumerate(label):
        result[ind, num] = 1.0
    return result

def train_exft_one_iter(iter_num, model, SFC, data_support, unlabel_inputs, support_targets, T1, T2, num_gpu, af=0.3, semi_l2_loss=False, temperature=1):
    # 动态更新无标记损失的权重
    bs = 64 # 2GPU, 防止 batch_size 过大产生一溢出
    alpha = 0.0
    if iter_num > T1:
        alpha = (iter_num - T1) / (T2 - T1) * af
        if iter_num > T2:
            alpha = af

    logits_labeld = model((SFC.unsqueeze(0).repeat(num_gpu, 1, 1, 1, 1), data_support))
    loss1 = F.cross_entropy(logits_labeld, support_targets)

    if af > 0:
        logits_unlabel = get_logits(model, SFC, unlabel_inputs, num_gpu, bs=bs)
        if semi_l2_loss:
            unlabel_prob_distrib = torch.softmax(logits_unlabel, 1)
            sharpen_prob = sharpen(unlabel_prob_distrib, temperature).detach()
            loss2 = F.mse_loss(unlabel_prob_distrib, sharpen_prob)

        else:
            values, pseudo_y = logits_unlabel.max(1)
            loss2 = F.cross_entropy(logits_unlabel, pseudo_y)
    else:
        loss2 = 0

    loss = loss1 + alpha * loss2
    return loss

def aug_finetune(iters, labeled_trainloader, model, optimizer, num_gpu, unlabeled_trainloader=None, fixmatch=False, threshold=0.8, fixmatch_mu=1, temperature=1):
    bs = labeled_trainloader.batch_size
    labeled_train_iter = iter(labeled_trainloader)
    for batch_idx in range(iters):
        # 由于存在两个 DataLoader 为了同步，只能手动录入每个批次的数据
        # try 防止 DataLoader 迭代完所有数据后使用 next() 报错
        global_writer.global_count += 1
        try:
            inputs_x, targets_x = labeled_train_iter.next()
        except:
            labeled_train_iter = iter(labeled_trainloader)
            inputs_x, targets_x = labeled_train_iter.next()
        if fixmatch:
            try:
                (inputs_u_w, inputs_u_s), _ = unlabeled_train_iter.next()
            except:
                unlabeled_train_iter = iter(unlabeled_trainloader)
                (inputs_u_w, inputs_u_s), _ = unlabeled_train_iter.next()
            inputx_u = torch.cat((inputs_u_w, inputs_u_s))
            logits_u = get_logits_for_origin_data(model, inputx_u, num_gpu, bs=64)
            logits_u_w, logtis_u_s = logits_u.chunk(2)
            pseudo_label = torch.softmax(logits_u_w.detach() / temperature, dim=-1)
            max_probs, targets_u = torch.max(pseudo_label, dim=-1)
            mask = max_probs.ge(threshold).float()
            Lu = (F.cross_entropy(logtis_u_s, targets_u, reduction="none") * mask).mean()
            global_writer.writer.add_scalar('data/mask_rate', float(mask.sum() / len(mask)), global_writer.global_count)
            global_writer.writer.add_scalar('data/unlabel loss', float(fixmatch_mu * Lu), global_writer.global_count)
        
        inputs_x = inputs_x.cuda()
        logits_x = get_logits_for_origin_data(model, inputs_x, num_gpu, bs=64)
        Lx = F.cross_entropy(logits_x, targets_x)
        if fixmatch:
            loss = Lx + fixmatch_mu * Lu
        else:
            loss = Lx
        global_writer.writer.add_scalar('data/label loss', float(loss), global_writer.global_count)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def train_exft_aug_one_epoch(epoch, iter_num_per_epoch, labeled_trainloader, unlabeled_trainloader, model, optimizer, T1, T2, num_gpu, af=0.3, semi_l2_loss=False, temperature=1):
    """
        使用 exft 训练一个epoch

        Args:
            iter_num_per_epoch(int): 指定每个 epoch 进行的迭代次数
            labeled_trainloader(DataLoader): 标记样本DataLoader
            unlabeled_trainloader(DataLoader): 无标记样本DataLoader
            model(Model): 模型
            optimizer(Optim): 优化器
    """
    bs = labeled_trainloader.batch_size
    labeled_train_iter = iter(labeled_trainloader)
    unlabeled_train_iter = iter(unlabeled_trainloader)
    for batch_idx in range(iter_num_per_epoch):
        # 由于存在两个 DataLoader 为了同步，只能手动录入每个批次的数据
        # try 防止 DataLoader 迭代完所有数据后使用 next() 报错
        try:
            inputs_x, targets_x = labeled_train_iter.next()
        except:
            labeled_train_iter = iter(labeled_trainloader)
            inputs_x, targets_x = labeled_train_iter.next() 

        try:
            inputs_u, _ = unlabeled_train_iter.next()
        except:
            unlabeled_train_iter = iter(unlabeled_trainloader)
            inputs_u, _ = unlabeled_train_iter.next()
        global_writer.global_count += 1
        inputs_x = inputs_x.cuda()
        inputs_u = inputs_u.cuda()
        if model.training:
            inputs_1 = torch.cat((inputs_x, inputs_u))
            logits_1 = get_logits_for_origin_data(model, inputs_1, num_gpu, bs=64)
            model.apply(freeze_bn)
            logits_2 = get_logits_for_origin_data(model, inputs_u, num_gpu, bs=64)
            model.apply(activate_bn)
            logits = torch.cat((logits_1, logits_2))
        else:
            inputs = torch.cat((inputs_x, inputs_u))
            logits = get_logits_for_origin_data(model, inputs, num_gpu, bs=64)
        logits_x = logits[:bs]
        logits_u = logits[bs:]
        loss1 = F.cross_entropy(logits_x, targets_x)

        alpha = 0.0
        iter_num = batch_idx + (epoch-1) * iter_num_per_epoch
        if iter_num > T1:
            alpha = (iter_num - T1) / (T2 - T1) * af
            if iter_num > T2:
                alpha = af
        if af > 0:
            if semi_l2_loss:
                unlabel_prob_distrib = torch.softmax(logits_u, 1)
                sharpen_prob = sharpen(unlabel_prob_distrib, temperature).detach()
                loss2 = F.mse_loss(unlabel_prob_distrib, sharpen_prob)

            else:
                values, pseudo_y = logits_u.max(1)
                loss2 = F.cross_entropy(logits_u, pseudo_y)
        else:
            loss2 = 0
        loss = loss1 + alpha * loss2
        global_writer.writer.add_scalar('data/label loss', float(loss1), global_writer.global_count)
        global_writer.writer.add_scalar('data/unlabel_loss', float(alpha * loss2), global_writer.global_count)
        global_writer.writer.add_scalar('data/total loss', float(loss), global_writer.global_count)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def train_mixmatch_one_epoch(epoch, iter_num_per_epoch, num_class, labeled_trainloader, unlabeled_trainloader, model, criterion, optimizer,alpha, temperature, num_gpu, cgbn, remove_mixup=False):
    """
        使用 mixmatch 训练一个epoch

        Args:
            epoch(int): 当前 epoch 号
            iter_num_per_epoch(int): 指定每个 epoch 进行的迭代次数
            labeled_trainloader(DataLoader): 标记样本DataLoader
            unlabeled_trainloader(DataLoader): 无标记样本DataLoader
            model(Model): 模型
            criterion(): loss function for semi settings
            optimizer(Optim): 优化器
            temperature(int): sharpen 使用时的温度
            alpha(float): beta 分布的参数
            num_class(int): 类别数目，用于 label2onehot
    """
    global_writer.global_count += 1
    bs = labeled_trainloader.batch_size
    labeled_train_iter = iter(labeled_trainloader)
    unlabeled_train_iter = iter(unlabeled_trainloader)
    for batch_idx in range(iter_num_per_epoch):
        # 由于存在两个 DataLoader 为了同步，只能手动录入每个批次的数据
        # try 防止 DataLoader 迭代完所有数据后使用 next() 报错
        try:
            inputs_x, targets_x = labeled_train_iter.next()
        except:
            labeled_train_iter = iter(labeled_trainloader)
            inputs_x, targets_x = labeled_train_iter.next() 

        try:
            (inputs_u, inputs_u2), _ = unlabeled_train_iter.next()
        except:
            unlabeled_train_iter = iter(unlabeled_trainloader)
            (inputs_u, inputs_u2), _ = unlabeled_train_iter.next()
        
        inputs_x = inputs_x.cuda()
        inputs_u, inputs_u2 = inputs_u.cuda(), inputs_u2.cuda()
        targets_x = label2onehot(targets_x, num_class).cuda()

        with torch.no_grad():
            if cgbn:
                model.apply(freeze_bn)
            outputs_u = get_logits_for_origin_data(model, inputs_u, num_gpu, bs=64)
            outputs_u2 = get_logits_for_origin_data(model, inputs_u2, num_gpu, bs=64)
            pred = (torch.softmax(outputs_u, dim=1) + torch.softmax(outputs_u2, dim=1)) / 2
            pred_temp = pred ** ( 1 / temperature)
            targets_u = pred_temp / pred_temp.sum(1, keepdim=True)
            targets_u = targets_u.detach()
            if cgbn:
                model.apply(activate_bn)

        all_inputs = torch.cat([inputs_x, inputs_u, inputs_u2], dim=0)
        all_targets = torch.cat([targets_x, targets_u, targets_u], dim=0)
        if remove_mixup:
            no_mixed_input = list(torch.split(all_inputs, bs))
            no_mixed_input = interleave(no_mixed_input, bs)
            logits = [get_logits_for_origin_data(model, no_mixed_input[0], num_gpu, bs=64)]
            for input in no_mixed_input[1:]:
                logits.append(get_logits_for_origin_data(model, input, num_gpu, bs=64))
            logits = interleave(logits, bs)
            final_target = all_targets
        else:
        # mixup
            l = np.random.beta(alpha, alpha)
            l = max(l, 1-l)

            idx = torch.randperm(all_inputs.size(0))
            input_a, input_b = all_inputs, all_inputs[idx]
            target_a, target_b = all_targets, all_targets[idx]

            mixed_input = l * input_a + (1 - l) * input_b
            mixed_target = l * target_a + (1 - l) * target_b
            
            mixed_input = list(torch.split(mixed_input, bs))
            mixed_input = interleave(mixed_input, bs)
            # get_logits_for_origin_data 函数中的 bs 与本域下使用的 bs 含义有所不同
            logits = [get_logits_for_origin_data(model, mixed_input[0], num_gpu, bs=64)]
            for input in mixed_input[1:]:
                logits.append(get_logits_for_origin_data(model, input, num_gpu, bs=64))
            logits = interleave(logits, bs)
            final_target = mixed_target
            
        logits_x = logits[0]
        logits_u = torch.cat(logits[1:], dim=0)

        Lx, Lu, w = criterion(logits_x, final_target[:bs], logits_u, final_target[bs:], epoch+batch_idx / iter_num_per_epoch)
        loss =  Lx + w * Lu
        global_writer.writer.add_scalar('data/unlabel_loss', float(w * Lu), global_writer.global_count)
        global_writer.writer.add_scalar('data/w', float(w), global_writer.global_count)
        global_writer.writer.add_scalar('data/loss', float(loss), global_writer.global_count)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # if not args.remove_ema:
        #             ema_optimizer.step()

def train_fixmatch_one_epoch(args, epoch, iter_num_per_epoch, labeled_trainloader, unlabeled_trainloader, model, ema_model, optimizer, threshold, lambda_u, num_gpu, temperature=1):
    """
        使用 fixmatch 训练一个epoch

        Args:
            iter_num_per_epoch(int): 指定每个 epoch 进行的迭代次数
            labeled_trainloader(DataLoader): 标记样本DataLoader
            unlabeled_trainloader(DataLoader): 无标记样本DataLoader
            model(Model): 模型
            ema_model(ModelEMA) : ema 模型
            optimizer(Optim): 优化器
            threshold(float): 无标记损失的界限
            temperature(int): pseudo 使用时的温度，默认为1, 即不进行升温
            lambda_u(float) : 无标记损失权重
    """
    # global writer
    # global global_count
    bs = labeled_trainloader.batch_size
    # mu = int(unlabeled_trainloader.batch_size / bs)
    labeled_train_iter = iter(labeled_trainloader)
    unlabeled_train_iter = iter(unlabeled_trainloader)
    for batch_idx in range(iter_num_per_epoch):
        # 由于存在两个 DataLoader 为了同步，只能手动录入每个批次的数据
        # try 防止 DataLoader 迭代完所有数据后使用 next() 报错
        try:
            inputs_x, targets_x = labeled_train_iter.next()
        except:
            labeled_train_iter = iter(labeled_trainloader)
            inputs_x, targets_x = labeled_train_iter.next() 

        try:
            (inputs_u_w, inputs_u_s), _ = unlabeled_train_iter.next()
        except:
            unlabeled_train_iter = iter(unlabeled_trainloader)
            (inputs_u_w, inputs_u_s), _ = unlabeled_train_iter.next()
        
        global_writer.global_count += 1
        inputs_x = inputs_x.cuda()
        inputs_u_w, inputs_u_s = inputs_u_w.cuda(), inputs_u_s.cuda()
        if model.training:
            inputs_1 = torch.cat((inputs_x, inputs_u_w))
            logits_1 = get_logits_for_origin_data(model, inputs_1, num_gpu, bs=64)
            model.apply(freeze_bn)
            logits_2 = get_logits_for_origin_data(model, inputs_u_s, num_gpu, bs=64)
            model.apply(activate_bn)
            logits = torch.cat((logits_1, logits_2))
        else:
            inputs = torch.cat((inputs_x, inputs_u_w, inputs_u_s))
            logits = get_logits_for_origin_data(model, inputs, num_gpu, bs=64)
        logits_x = logits[:bs]
        logits_u_w, logtis_u_s = logits[bs:].chunk(2)
        del logits

        pseudo_label = torch.softmax(logits_u_w.detach() / temperature, dim=-1)
        max_probs, targets_u = torch.max(pseudo_label, dim=-1)
        if args.fixmatch_linear_weight:
            Lx = F.cross_entropy(logits_x, targets_x)
            Lu = F.cross_entropy(logtis_u_s, targets_u)
            w = linear_rampup(epoch+batch_idx / iter_num_per_epoch, args.mse)
            loss = Lx + w * Lu * lambda_u
            global_writer.writer.add_scalar('data/unlabel_loss', float(w * Lu), global_writer.global_count)
            global_writer.writer.add_scalar('data/w', float(w * lambda_u), global_writer.global_count)
            global_writer.writer.add_scalar('data/loss', float(loss), global_writer.global_count)
        else:
            mask = max_probs.ge(threshold).float()
            Lx = F.cross_entropy(logits_x, targets_x)
            Lu = (F.cross_entropy(logtis_u_s, targets_u, reduction="none") * mask).mean()
            loss = Lx + lambda_u * Lu
            global_writer.writer.add_scalar('data/unlabel_loss', float(lambda_u * Lu), global_writer.global_count)
            global_writer.writer.add_scalar('data/mask_rate', float(mask.sum() / len(mask)), global_writer.global_count)
            global_writer.writer.add_scalar('data/loss', float(loss), global_writer.global_count)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if ema_model is not None:
            ema_model.update(model)

def train_rotate_one_epoch(epoch, iter_num_per_epoch, labeled_trainloader, rotate_trainloader, model, ema_model, optimizer, rotw, num_gpu):
    """
        使用 fixmatch 训练一个epoch

        Args:
            iter_num_per_epoch(int): 指定每个 epoch 进行的迭代次数
            labeled_trainloader(DataLoader): 标记样本DataLoader
            rotate_trainloader(DataLoader): rotate 无标记样本DataLoader
            model(Model): 模型
            ema_model(ModelEMA) : ema 模型
            optimizer(Optim): 优化器
            rotw(float) : rotation 无标记损失权重
    """
    # global writer
    # global global_count
    bs = labeled_trainloader.batch_size
    # mu = int(unlabeled_trainloader.batch_size / bs)
    labeled_train_iter = iter(labeled_trainloader)
    rotate_train_iter = iter(rotate_trainloader)
    for batch_idx in range(iter_num_per_epoch):
        # 由于存在两个 DataLoader 为了同步，只能手动录入每个批次的数据
        # try 防止 DataLoader 迭代完所有数据后使用 next() 报错
        try:
            inputs_x, targets_x = labeled_train_iter.next()
        except:
            labeled_train_iter = iter(labeled_trainloader)
            inputs_x, targets_x = labeled_train_iter.next() 

        try:
            rotate_imgs, rotate_labels = rotate_train_iter.next()
        except:
            rotate_train_iter = iter(rotate_trainloader)
            rotate_imgs, rotate_labels = rotate_train_iter.next()
        
        global_writer.global_count += 1
        inputs_x = inputs_x.cuda()
        rotate_labels = rotate_labels.cuda()
        rotate_imgs = rotate_imgs.cuda()
        C,H,W =rotate_imgs.size()[-3:]
        rotate_imgs = rotate_imgs.reshape(-1, C, H, W)
        rotate_labels = rotate_labels.flatten()
        # shuffle_idx = torch.randperm(len(rotate_imgs))
        # rotate_imgs = rotate_imgs[shuffle_idx]
        # rotate_labels = rotate_labels[shuffle_idx]

        try:
            logits_x = get_logits_for_origin_data(model, inputs_x, num_gpu, bs=64)
        except:
            import pdb
            print("unexpect error in training, enter pdb mode")
            pdb.set_trace()

        Lx = F.cross_entropy(logits_x, targets_x)

        rotation_features = get_features_for_origin_data(model, rotate_imgs)
        rotation_features = torch.squeeze(rotation_features)
        if epoch <= 3:
            rotation_features = rotation_features.detach()
        scores = model.module.rotation_classifer(rotation_features)
        rotate_loss = F.cross_entropy(scores, rotate_labels)
        loss = Lx + rotw * rotate_loss

        global_writer.writer.add_scalar('data/rotate_loss', float(rotw * rotate_loss), global_writer.global_count)
        global_writer.writer.add_scalar('data/loss', float(loss), global_writer.global_count)
        global_writer.writer.add_scalar('data/labelled_loss', float(Lx), global_writer.global_count)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def train_triplet_one_epoch(epoch, iter_num_per_epoch, labeled_trainloader, triplet_trainloader, model, ema_model, optimizer, tripletw, margin, trisolver, num_gpu):
    """
        使用 triplet loss 训练一个epoch

        Args:
            iter_num_per_epoch(int): 指定每个 epoch 进行的迭代次数
            labeled_trainloader(DataLoader): 标记样本 DataLoader
            triplet_trainloader(DataLoader): triplet 样本DataLoader
            model(Model): 模型
            ema_model(ModelEMA) : ema 模型
            optimizer(Optim): 优化器
            tripletw(float) : triplet loss 权重
    """
    # global writer
    # global global_count
    bs = labeled_trainloader.batch_size
    # mu = int(unlabeled_trainloader.batch_size / bs)
    labeled_train_iter = iter(labeled_trainloader)
    triplet_bs = triplet_trainloader.batch_size
    triplet_train_iter = iter(triplet_trainloader)
    for batch_idx in range(iter_num_per_epoch):
        # 由于存在两个 DataLoader 为了同步，只能手动录入每个批次的数据
        # try 防止 DataLoader 迭代完所有数据后使用 next() 报错
        try:
            inputs_x, targets_x = labeled_train_iter.next()
        except:
            labeled_train_iter = iter(labeled_trainloader)
            inputs_x, targets_x = labeled_train_iter.next() 

        try:
            anchor, positive, negative = triplet_train_iter.next()
        except:
            triplet_train_iter = iter(triplet_trainloader)
            anchor, positive, negative = triplet_train_iter.next()
        
        global_writer.global_count += 1
        inputs_x = inputs_x.cuda()
        logits_x = get_logits_for_origin_data(model, inputs_x, num_gpu, bs=64)
        Lx = F.cross_entropy(logits_x, targets_x)
        distance_function = partial(model.module.compute_emd_distance_for_batch, solver=trisolver)
        Tripletloss = nn.TripletMarginWithDistanceLoss(distance_function=distance_function, margin=margin)
        triplet_input = torch.cat((anchor, positive, negative))
        triplet_tenosr = get_features_for_origin_data(model, triplet_input, dense=True)
        anchor_tensor, positive_tensor, negative_tensor = triplet_tenosr.split(triplet_bs)
        triplet_loss = Tripletloss(anchor_tensor, positive_tensor, negative_tensor)
        loss = Lx + triplet_loss * tripletw

        global_writer.writer.add_scalar('data/triplet_loss', float(triplet_loss * tripletw), global_writer.global_count)
        global_writer.writer.add_scalar('data/loss', float(loss), global_writer.global_count)
        global_writer.writer.add_scalar('data/labelled_loss', float(Lx), global_writer.global_count)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def get_features_for_origin_data(model, data_origin, dense=False):
    """
        将样本映射为特征
        dense: 决定样本特征是否为局部特征
    """
    feature = model.module.encode(data_origin, dense=dense)
    return feature

def get_logits_for_origin_data(model, data_origin, num_gpu, bs=64):
    """
        主要在 mixmatch 部分使用，与其他 get_logits 区别在于输入的 data 不同，在此处输入的是图像的 tensor
        在其他地方使用的是图像的特征tensor

        Args:
            model(Model): 带有初始化后 SFC 层的模型
            data_origin(Tensor): input image tensor
    """
    model.module.mode = "encoder"
    data = model(data_origin)
    model.module.mode = "meta"
    logits = get_logits(model, model.module.sfc, data, num_gpu, bs)
    return logits

def get_logits(model, SFC, data, num_gpu, bs=64):
    if len(data) > bs:
        # 是否存在 require_grad 的问题？否
        # init
        logits = torch.zeros(len(data), len(SFC)).cuda()
        for i in range(0, len(data), bs):
            logits[i:min(i+bs, len(data))] = model((SFC.unsqueeze(0).repeat(num_gpu, 1, 1, 1, 1), data[i:min(i+bs, len(data))]))
    else:
        logits = model((SFC.unsqueeze(0).repeat(num_gpu, 1, 1, 1, 1), data))
    return logits

def get_emd_distance(X, Y, model, gamma=None):
    """
    为 label propagation 准备的 kernel function.
    Args:
        X(numpy array): shape 为 (n_samples, n_features) 的 array, 为输入数据构建 affinity matrix
        model(model): 提供计算 EMD 距离的 API
    Returns:
        Affinity matrix(array): 形为 (n_samples, n_samples) 的矩阵
    """
    X, Y = torch.from_numpy(X).cuda(), torch.from_numpy(Y).cuda()
    distance_matrix = torch.randn(len(X), len(X))
    bs = 10
    rows = math.ceil(len(X) / bs)
    cols = math.ceil(len(Y) / bs)
    for i in range(rows):
        for j in range(cols):
            distance_matrix[i*bs:min(len(X), (i+1) * bs), j*bs:min(len(Y), (j+1) * bs)] = \
                model.compute_emd_distance(Y[j*bs:min(len(Y), (j+1) * bs)], X[i*bs:min(len(X), (i+1) * bs)], solver='opencv')
    # distance_matrix = model.compute_emd_distance(X, Y, solver='opencv') # [n_samples, n_sample] 的 distance 矩阵，需要转换为 weight 矩阵
    # 参考 scikit-learn label propagation rbf kernel转换
    if gamma is None:
        gamma = 1.0 / X.shape[1]
    distance_matrix_numpy = distance_matrix.cpu().numpy()
    distance_matrix_numpy *= -gamma
    np.exp(distance_matrix_numpy, distance_matrix_numpy)  # exponentiate K in-place
    return distance_matrix_numpy


def sharpen(tensor, temperature):
    cooled_tensor = torch.pow(tensor, 1 / temperature)
    cooled_prob = torch.div(cooled_tensor, cooled_tensor.sum(1, keepdim=True))
    return cooled_prob

def interleave_offsets(batch, nu):
    groups = [batch // (nu + 1)] * (nu + 1)
    for x in range(batch - sum(groups)):
        groups[-x - 1] += 1
    offsets = [0]
    for g in groups:
        offsets.append(offsets[-1] + g)
    assert offsets[-1] == batch
    return offsets

def interleave(xy, batch):
    nu = len(xy) - 1
    offsets = interleave_offsets(batch, nu)
    xy = [[v[offsets[p]:offsets[p + 1]] for p in range(nu + 1)] for v in xy]
    for i in range(1, nu + 1):
        xy[0][i], xy[i][i] = xy[i][i], xy[0][i]
    return [torch.cat(v, dim=0) for v in xy]

def interleave_fixmatch(x, size):
    """
        与 mixmatch 不同点在于，无标记样本与标记样本比例不同
    """
    s = list(x.shape)
    return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])

def de_interleave_fixmatch(x, size):
    """
        interleave_fixmatch 的反操作
    """
    s = list(x.shape)
    return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])

def freeze_bn(m):
    """
        配合 model.apply(func) 方法使用, 可方便将网络中的所有BN层设置为 eval() 模式
    """
    if isinstance(m, nn.BatchNorm2d):
        # m.eval()
        m.track_running_stats=False

def activate_bn(m):
    """
        配合 model.apply(func) 方法使用, 可方便将网络中的所有BN层设置为 eval() 模式
    """
    if isinstance(m, nn.BatchNorm2d):
        # m.train()
        m.track_running_stats=True

def graph_dense_to_knn(graph, k):
    if k > len(graph):
        return graph
    else:
        _, indices = graph.topk(k, 1, largest=False)
        # 初始化全为 False 的 mask
        mask = torch.ones_like(graph).le(0)
        assert len(mask) == len(indices), "mask 与 indices 维度要匹配"
        for i in range(len(mask)):
            mask[i][indices[i]] = True
        graph[~mask] = 0
        return graph, ~mask

def freeze_layer(layer):
    for child in layer.children():
        for param in child.parameters():
            param.requires_grad = False

def sample_denoise(args, support_feature, support_label, query_feature, query_label, SFC, model, num_gpu):
    """参考 ilpc 中的 label clean"""
    # detach() cause bugs
    # SFC_temp = SFC.detach()
    SFC_temp = SFC.clone().detach()
    SFC_temp.requires_grad = True
    # all_embeddings = np.concatenate((support, query), axis=0)
    X = torch.cat((support_feature, query_feature), 0)
    # input_size = all_embeddings.shape[1]
    Y = torch.cat((support_label, query_label), 0).type(torch.long)
    # X = torch.tensor(all_embeddings, dtype=torch.float32, requires_grad=True)
    # all_ys = np.concatenate((support_ys, query_ys_pred), axis=0)
    # Y = torch.tensor(all_ys, dtype=torch.long)
    # output_size = suppor.max() + 1
    # start_lr = 0.1
    # end_lr = 0.1
    # cycle = 50 #number of epochs
    # step_size_lr = (start_lr - end_lr) / cycle
    lr = args.sfc_lr
   # print(input_size, output_size.item())
    # lambda1 = lambda x: start_lr - (x % cycle)*step_size_lr
    # o2u = nn.Linear(input_size, output_size.item())
    # o2u = weight_imprinting(torch.Tensor(all_embeddings[:support_ys.shape[0]]), support_ys, o2u)
    #o2u = weight_imprinting(torch.Tensor(all_embeddings[:opt.n_ways*opt.n_shots]), all_ys[:opt.n_ways*opt.n_shots], o2u)

    optimizer = optim.SGD([{'params': [SFC_temp], "lr":lr, "dampening":0.9, "weight_decay": 0}], momentum=0.9, nesterov=True, weight_decay=0.0005)
    # optimizer = optim.SGD(o2u.parameters(), 1, momentum=0.9, weight_decay=5e-4)
    # scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda = lambda1)
    criterion = nn.CrossEntropyLoss(reduction = 'none')
    loss_statistics = torch.zeros(Y.shape, requires_grad=True).cuda()
    lr_progression =[]
    for epoch in range(args.denoise_iters):
        output = get_logits(model, SFC_temp, X, num_gpu, bs=64)
        optimizer.zero_grad()
        loss_each = criterion(output, Y)
        loss_each = loss_each# * weights
        loss_all = torch.mean(loss_each)
        loss_all.backward()
        loss_statistics = loss_statistics + loss_each/(args.denoise_iters)
        optimizer.step()
        lr_progression.append(optimizer.param_groups[0]['lr'])
    return loss_statistics, lr_progression

def sample_denoise_weight_imprint(args, support_feature, support_label, query_feature, query_label, SFC_temp, model, num_gpu):
    """参考 ilpc 中的 label clean"""
    X = torch.cat((support_feature, query_feature), 0)
    # input_size = all_embeddings.shape[1]
    Y = torch.cat((support_label, query_label), 0).type(torch.long)
    # X = torch.tensor(all_embeddings, dtype=torch.float32, requires_grad=True)
    # all_ys = np.concatenate((support_ys, query_ys_pred), axis=0)
    # Y = torch.tensor(all_ys, dtype=torch.long)
    # output_size = suppor.max() + 1
    start_lr = 1
    end_lr = 100
    cycle = 10 #number of epochs
    step_size_lr = (start_lr - end_lr) / cycle
   # print(input_size, output_size.item())
    lambda1 = lambda x: start_lr - (x % cycle)*step_size_lr
    # o2u = nn.Linear(input_size, output_size.item())
    # o2u = weight_imprinting(torch.Tensor(all_embeddings[:support_ys.shape[0]]), support_ys, o2u)
    #o2u = weight_imprinting(torch.Tensor(all_embeddings[:opt.n_ways*opt.n_shots]), all_ys[:opt.n_ways*opt.n_shots], o2u)

    optimizer = optim.SGD([{'params': [SFC_temp], "lr":1, "dampening":0.9, "weight_decay": 0}], momentum=0.9, nesterov=True, weight_decay=0.0005)
    scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda = lambda1)
    criterion = nn.CrossEntropyLoss(reduction = 'none')
    loss_statistics = torch.zeros(Y.shape, requires_grad=True).cuda()
    lr_progression =[]
    for epoch in range(args.denoise_iters):
        output = get_logits(model, SFC_temp, X, num_gpu, bs=64)
        optimizer.zero_grad()
        loss_each = criterion(output, Y)
        loss_each = loss_each# * weights
        loss_all = torch.mean(loss_each)
        loss_all.backward()
        loss_statistics = loss_statistics + loss_each/(args.denoise_iters)
        optimizer.step()
        scheduler_lr.step()
        lr_progression.append(optimizer.param_groups[0]['lr'])
    return loss_statistics, lr_progression

def print_unlabel_result(logits_rank):
    result = []
    for i in logits_rank:
        if (i[2] - i[1]) % 5 ==0:
            result.append(True)
        else:
            result.append(False)
    print(result)


if __name__ == "__main__":
    a = torch.randn(12,10)
    b = torch.split(a, 4)
    interleave(b, 4)