import math
import sys
from copy import deepcopy
from PIL.Image import new
import torchvision
# from torchvision.transforms.functional
import torch
from torch.nn.utils import clip_grad_norm_
from utils.utils import MetricLogger, SmoothedValue, mkdir, reduce_dict, warmup_lr_scheduler
from models.mplp import MPLP
from torch.nn import functional as F

def to_device(images, targets, device):
    images = [image.to(device) for image in images]
    for t in targets:
        t["boxes"] = t["boxes"].to(device)
        t["labels"] = t["labels"].to(device)
    return images, targets

def train_one_epoch_two_path(cfg, model, memory,criterion,optimizer, data_loader, device, epoch,
                    tfboard=None,gt_multilabel_source=None):
                    
    labelpred = MPLP(cfg.THRE_SIM,cfg)
    model.train()
    metric_logger = MetricLogger(delimiter="  ")
    metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
    header = "Epoch: [{}]".format(epoch)

    # warmup learning rate in the first epoch
    warmup=cfg.WARMUP
    if epoch == 0 and warmup:
        warmup_factor = 1.0 / 1000
        warmup_iters = len(data_loader) - 1
        warmup_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)

    for i, (images, targets) in enumerate(
            metric_logger.log_every(data_loader, cfg.DISP_PERIOD, header)
    ):  
        #处理多个batch，不能变换target的顺序
        img_crops=[t.pop('img_crops') for t in targets]
        crop_batch=[]
        for t in img_crops:
            crop_batch=crop_batch+t
        #transfer to tensor 
        crop_batch_tensor =[torchvision.transforms.functional.to_tensor(cc).to(device) for cc in crop_batch]

        boxes_ins=[[[0,0,64,190]] for i in range(len(crop_batch_tensor))]
        labels_ins = [t['labels'] for t in targets]
        labels_ins =torch.cat(labels_ins).to(device)
        targets_ins = [{'boxes':torch.Tensor(ins_box).to(device),'labels':ins_label} for ins_box,ins_label in zip(boxes_ins,labels_ins)]

        images, targets = to_device(images, targets, device)

        #------------------------------loss_ns-----------------------------
        ####$$$输入更改为 双路 ，输出两个 box_embeddings_ins,box_pids_labels_ins
        loss_dict, box_embeddings, box_pid_labels=model(images,targets)
        ins_fb=model(crop_batch_tensor,targets_ins,query_img_as_gallery=False,extract_ins_feature=True)

        # loss_dict, box_embeddings, box_pid_labels = model(images, targets)
        #过滤背景后的候选特征,与memory计算相似度时需要在index 减去1
        # 只对于search的 ，得出后续计算Lins损失
        fab_up_mem = torch.cat((box_embeddings,ins_fb))
        lab_up_mem = box_pid_labels+[labels_ins]
        sims,label,inputs= memory(box_embeddings,box_pid_labels,epoch)
        sims_ab,labels_ab,inputs_ab= memory(fab_up_mem,lab_up_mem,epoch)

        ###$$张建贺 计算 ins——feature和search featrue的距离损失，正例拉近
        # 对search的 inputs中重复的只取一个   ###取均值后 正则化 lut[y] /= lut[y].norm()

        # 两种情况，
        # 第一lab_ins经过sort，对应ins 做sort后，长度等于 经过去重（多检测） sort的 lab_search，则直接mm
        # 第二lab_ins和search都经过sort后 长度不等，则取search的（说明检测器漏检了），怎么做？

        # search去重 再sort
        no_repeat_ind=[]
        new_label_no_repeat=[]
        for lab in label:
            if lab not in new_label_no_repeat:
                new_label_no_repeat.append(lab)
                no_repeat_ind.append(True)
            else:
                no_repeat_ind.append(False)
        new_inputs=inputs[no_repeat_ind]
        new_label = label[no_repeat_ind]
        lab_a,indices = new_label.sort()
        fa=new_inputs[indices]
        
        # ins sort(不用去重)
        labels_ins=labels_ins-1 #对应mem的ind
        lab_b,indices_ins = labels_ins.sort()
        fb=ins_fb[indices_ins]

        #第一种 相等 比较好办 直接 mm
        if lab_a.shape==lab_b.shape:
            pr_sim=fa.mm(fb.t())
        #第二种 不相等，按照 lab——a （search少的 ）的来取相同的部分，循环 lab——b，如果l再a中，则ins为True，否则为False，再重取fb
        else:
            print('repeat ... ....')
            lab_b_no_repeat_ind=[]
            for l in lab_b:
                if l in lab_a:
                    lab_b_no_repeat_ind.append(True)
                else:
                    lab_b_no_repeat_ind.append(False)
            lab_b=lab_b[lab_b_no_repeat_ind]
            fb   =fb[lab_b_no_repeat_ind]
            assert len(lab_b)==len(lab_a),"repeat crop "
            pr_sim=fa.mm(fb.t())
        
        # 计算 loss——ins
        loss_ins = []
        use_OuShi=False
        use_sim_to_1=True
        if use_OuShi:
            for i in range(len(labels_ins)):
                fa=new_inputs[i]
                fb=ins_fb[i]
                cha_2 = torch.pow(fa - fb, 2).sum()
                dd_loss = torch.sqrt(cha_2)
                loss_ins.append(dd_loss)
            loss_ins = torch.mean(torch.stack(loss_ins))
        if use_sim_to_1:
            for k in range(len(lab_b)):
                sim=pr_sim[k][k]
                l = 2*torch.mean((1 - sim).pow(2)) # 将ins 扩大 2倍数 看看效果
                loss_ins.append(l)
            loss_ins = torch.mean(torch.stack(loss_ins))

        loss_dict.update(loss_ins=loss_ins)
        #----------------------------------------loss_pr---------------------------
        #pr_sim 负例相似度变化到 -1
        use_loss_pr=True
        if i==0:
            print('use_loss_pr',use_loss_pr)
        
        if use_loss_pr:
            sa= fa.mm(fa.t())
            # sa/=sa.norm().flatten()
            sb= fb.mm(fb.t())
            # sb/=sb.norm().flatten()
            # loss_pr = torch.nn.functional.kl_div(sa.abs(),sb.abs())+torch.nn.functional.kl_div(sb,sa)
            loss_pr = torch.mean((sa - sb).pow(2))
            loss_dict.update(loss_pr=loss_pr)

        #-----------------------------------------reid -----------------------------
        if len(inputs)!=0: # inputs是提取的box特征，sims是logits
            if cfg.single_label:
                # print('use single label...')
                multilabel = labelpred.predict_single_label(memory.lut.detach().clone(),labels_ab.detach().clone())
            else:
                multilabel = labelpred.predict_ml_with_ssm_gt(memory.lut.detach().clone(),labels_ab.detach().clone(), epoch, gt_multilabel_source)
            
            #利用多标签进行reid多分类
            loss_box_reid = criterion(sims_ab, multilabel, True, memory.lut.detach().clone(),inputs_ab)#smi and inputs with grad

            loss_dict.update(loss_box_reid=loss_box_reid)
            loss_dict["loss_box_reid"] *= cfg.SOLVER.LW_BOX_REID


        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            print(f"Loss is {loss_value}, stopping training")
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        # print(i, sum(memory.lut[label[0]]))
        losses.backward()
        # print(i, sum(memory.lut[label[0]]))

        if cfg.SOLVER.CLIP_GRADIENTS > 0:
            clip_grad_norm_(model.parameters(), cfg.SOLVER.CLIP_GRADIENTS)
        optimizer.step()

        if epoch == 0 and warmup:
            warmup_scheduler.step()

        metric_logger.update(loss=loss_value, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
        if tfboard:
            iter_n = epoch * len(data_loader) + i
            for k, v in loss_dict_reduced.items():
                tfboard.add_scalars("train", {k: v}, iter_n)
            lr = optimizer.param_groups[0]["lr"]
            # pdb.set_trace()
            # print(epoch, 'lr :', lr)
            tfboard.add_scalars('lr', {'lr-epoch': lr}, iter_n)
