import math
import sys
from copy import deepcopy

import torch
from torch.nn.utils import clip_grad_norm_
from tqdm import tqdm

from eval_func import eval_detection, eval_search_cuhk, eval_search_prw
from utils.utils import MetricLogger, SmoothedValue, mkdir, reduce_dict, warmup_lr_scheduler
import pdb
from models.mplp import MPLP
from torch.nn import functional as F

def to_device(images, targets, device):
    images = [image.to(device) for image in images]
    for t in targets:
        t["boxes"] = t["boxes"].to(device)
        t["labels"] = t["labels"].to(device)
    return images, targets

def train_one_epoch(cfg, model, memory,criterion,optimizer, data_loader, device, epoch,
                    tfboard=None,gt_multilabel_source=None):
                    
    labelpred = MPLP(cfg.THRE_SIM,cfg)
    model.train()
    metric_logger = MetricLogger(delimiter="  ")
    metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
    header = "Epoch: [{}]".format(epoch)

    # warmup learning rate in the first epoch
    warmup=cfg.WARMUP
    if epoch == 0 and warmup:
        warmup_factor = 1.0 / 1000
        warmup_iters = len(data_loader) - 1
        warmup_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)

    for i, (images, targets) in enumerate(
            metric_logger.log_every(data_loader, cfg.DISP_PERIOD, header)
    ):
        
        images, targets = to_device(images, targets, device)
        loss_dict, box_embeddings, box_pid_labels = model(images, targets)
        sims,label,inputs,sims_nid,label_nid,inputs_nid= memory(box_embeddings,box_pid_labels,epoch)

        if len(inputs)!=0: # inputs是提取的box特征，sims是logits
            if cfg.single_label:
                # print('use single label...')
                multilabel = labelpred.predict_single_label(memory.lut.detach().clone(),label.detach().clone())
            else:
                multilabel = labelpred.predict_ml_with_ssm_gt(memory.lut.detach().clone(),label.detach().clone(), epoch, gt_multilabel_source)
            
            #利用多标签进行reid多分类
            loss_box_reid = criterion(sims, multilabel, True, memory.lut.detach().clone(),inputs)#smi and inputs with grad

            loss_dict.update(loss_box_reid=loss_box_reid)
            loss_dict["loss_box_reid"] *= cfg.SOLVER.LW_BOX_REID

        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            print(f"Loss is {loss_value}, stopping training")
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        # print(i, sum(memory.lut[label[0]]))
        losses.backward()
        # print(i, sum(memory.lut[label[0]]))

        if cfg.SOLVER.CLIP_GRADIENTS > 0:
            clip_grad_norm_(model.parameters(), cfg.SOLVER.CLIP_GRADIENTS)
        optimizer.step()

        if epoch == 0 and warmup:
            warmup_scheduler.step()

        metric_logger.update(loss=loss_value, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
        if tfboard:
            iter_n = epoch * len(data_loader) + i
            for k, v in loss_dict_reduced.items():
                tfboard.add_scalars("train", {k: v}, iter_n)
            lr = optimizer.param_groups[0]["lr"]
            # pdb.set_trace()
            # print(epoch, 'lr :', lr)
            tfboard.add_scalars('lr', {'lr-epoch': lr}, iter_n)

def train_one_epoch_unio_oim(cfg, model, memory,criterion,optimizer, data_loader, device, epoch,
            tfboard,gt_multilabel=None,data_loader_source=None,
                             gt_multilabel_source=None,memory_s=None):
    labelpred = MPLP(cfg.THRE_SIM,cfg)
    model.train()
    metric_logger = MetricLogger(delimiter="  ")
    metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
    header = "Epoch: [{}]".format(epoch)

    # warmup learning rate in the first epoch
    warmup=cfg.WARMUP
    if epoch == 0 and warmup:
        warmup_factor = 1.0 / 1000
        warmup_iters = len(data_loader) - 1
        warmup_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)

    for i, (images, targets) in enumerate(
            metric_logger.log_every(data_loader, cfg.DISP_PERIOD, header)
    ):
        images, targets = to_device(images, targets, device)
        #这里loss只有5项，不含reid，当mem_id=True,才会计算oim
        loss_dict, box_embeddings, box_pid_labels = model(images, targets,query_img_as_gallery=False,mem_id2GtID=False)
        # sims=logit，single ID， feats，后三项为了过滤标签no-id的信息，现在不使用
        sims,label,inputs,sims_nid,label_nid,inputs_nid= memory(box_embeddings,box_pid_labels,epoch)# forward （input_feats, single_class_label)
        if len(inputs)!=0: # inputs是提取的box特征，sims是logits
            if gt_multilabel is not None:
                use_gtids=True
            else: use_gtids=False

            if use_gtids:
                multilabel= labelpred.predict_gtids_mul(memory.lut.detach().clone(),
                            label.detach().clone(),epoch,gt_multilabel,use_un_oem=cfg.SINGLE_LABEL)
            else:
                multilabel = labelpred.predict_ml(memory.lut.detach().clone(), label.detach().clone(),epoch)

            #利用多标签进行reid多分类
            loss_box_reid = criterion(sims, multilabel, True, memory.lut.detach().clone(),inputs)#smi and inputs with grad

            loss_dict.update(loss_box_reid=loss_box_reid)

        use_unio=False
        if use_unio:
            if i%len(data_loader_source)==0:
                data_loader_p_iter=iter(data_loader_source)
            images_s,targets_s = next(data_loader_p_iter)
            images_s, targets_s = to_device(images_s, targets_s, device)
            # loss 有 oim reid
            loss_dict_s, box_embeddings_s, box_pid_labels_s = model(images_s, targets_s,query_img_as_gallery=False,mem_id2GtID=True)

            loss_dict['loss_box_reid']+=0.5*loss_dict_s['loss_box_reid']

        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            print(f"Loss is {loss_value}, stopping training")
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        # print(i, sum(memory.lut[label[0]]))
        losses.backward()
        # print(i, sum(memory.lut[label[0]]))

        if cfg.SOLVER.CLIP_GRADIENTS > 0:
            clip_grad_norm_(model.parameters(), cfg.SOLVER.CLIP_GRADIENTS)
        optimizer.step()

        if epoch == 0 and warmup:
            warmup_scheduler.step()

        metric_logger.update(loss=loss_value, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
        if tfboard:
            iter_n = epoch * len(data_loader) + i
            for k, v in loss_dict_reduced.items():
                tfboard.add_scalars("train", {k: v}, iter_n)
            lr = optimizer.param_groups[0]["lr"]
            # pdb.set_trace()
            # print(epoch, 'lr :', lr)
            tfboard.add_scalars('lr', {'lr-epoch': lr}, iter_n)


def train_one_epoch_prw_with_ssm(cfg, model, memory,criterion,optimizer, data_loader, device, epoch,tfboard,gt_multilabel=None):
    labelpred = MPLP(cfg.THRE_SIM,cfg)
    model.train()
    metric_logger = MetricLogger(delimiter="  ")
    metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
    header = "Epoch: [{}]".format(epoch)

    # warmup learning rate in the first epoch
    warmup=cfg.WARMUP
    if epoch == 0 and warmup:
        warmup_factor = 1.0 / 1000
        warmup_iters = len(data_loader) - 1
        warmup_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)

    for i, (images, targets) in enumerate(
            metric_logger.log_every(data_loader, cfg.DISP_PERIOD, header)
    ):
        images, targets = to_device(images, targets, device)
        #这里loss只有5项，不含reid，当mem_id=True,才会计算oim
        loss_dict, box_embeddings, box_pid_labels = model(images, targets,query_img_as_gallery=False,mem_id2GtID=False)
        # sims=logit，single ID， feats，后三项为了过滤标签no-id的信息，现在不使用
        sims,label,inputs,sims_nid,label_nid,inputs_nid= memory(box_embeddings,box_pid_labels,epoch)# forward （input_feats, single_class_label)
        if len(inputs)!=0: # inputs是提取的box特征，sims是logits

            multilabel= labelpred.predict_ml_with_ssm_gt(memory.lut.detach().clone(),
                            label.detach().clone(),epoch,gt_multilabel)
            #利用多标签进行reid多分类
            loss_box_reid = criterion(sims, multilabel, True, memory.lut.detach().clone(),inputs)#smi and inputs with grad

            loss_dict.update(loss_box_reid=loss_box_reid)

        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            print(f"Loss is {loss_value}, stopping training")
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        # print(i, sum(memory.lut[label[0]]))
        losses.backward()
        # print(i, sum(memory.lut[label[0]]))

        if cfg.SOLVER.CLIP_GRADIENTS > 0:
            clip_grad_norm_(model.parameters(), cfg.SOLVER.CLIP_GRADIENTS)
        optimizer.step()

        if epoch == 0 and warmup:
            warmup_scheduler.step()

        metric_logger.update(loss=loss_value, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
        if tfboard:
            iter_n = epoch * len(data_loader) + i
            for k, v in loss_dict_reduced.items():
                tfboard.add_scalars("train", {k: v}, iter_n)
            lr = optimizer.param_groups[0]["lr"]
            # pdb.set_trace()
            # print(epoch, 'lr :', lr)
            tfboard.add_scalars('lr', {'lr-epoch': lr}, iter_n)

def train_one_epoch_prw_with_ssm_oim(cfg, model, memory,criterion,optimizer, data_loader,
                                     device, epoch,tfboard,gt_multilabel=None,mem_id2GtID=None):
    labelpred = MPLP(cfg.THRE_SIM,cfg)
    model.train()
    metric_logger = MetricLogger(delimiter="  ")
    metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
    header = "Epoch: [{}]".format(epoch)

    # warmup learning rate in the first epoch
    warmup=cfg.WARMUP
    if epoch == 0 and warmup:
        warmup_factor = 1.0 / 1000
        warmup_iters = len(data_loader) - 1
        warmup_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)

    for i, (images, targets) in enumerate(
            metric_logger.log_every(data_loader, cfg.DISP_PERIOD, header)
    ):
        images, targets = to_device(images, targets, device)
        #这里loss只有5项，不含reid，当mem_id=True,才会计算oim
        loss_dict, box_embeddings, box_pid_labels = model(images, targets,query_img_as_gallery=False,mem_id2GtID=mem_id2GtID)
        # sims=logit，single ID， feats，后三项为了过滤标签no-id的信息，现在不使用
        sims,label,inputs,sims_nid,label_nid,inputs_nid= memory(box_embeddings,box_pid_labels,epoch)# forward （input_feats, single_class_label)
        if len(inputs)!=0: # inputs是提取的box特征，sims是logits

            multilabel= labelpred.predict_ml_with_ssm_gt(memory.lut.detach().clone(),
                            label.detach().clone(),epoch,gt_multilabel)
            #利用多标签进行reid多分类
            loss_box_reid = criterion(sims, multilabel, True, memory.lut.detach().clone(),inputs)#smi and inputs with grad
            loss_dict['loss_box_reid']+=loss_box_reid

        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            print(f"Loss is {loss_value}, stopping training")
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        # print(i, sum(memory.lut[label[0]]))
        losses.backward()
        # print(i, sum(memory.lut[label[0]]))

        if cfg.SOLVER.CLIP_GRADIENTS > 0:
            clip_grad_norm_(model.parameters(), cfg.SOLVER.CLIP_GRADIENTS)
        optimizer.step()

        if epoch == 0 and warmup:
            warmup_scheduler.step()

        metric_logger.update(loss=loss_value, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
        if tfboard:
            iter_n = epoch * len(data_loader) + i
            for k, v in loss_dict_reduced.items():
                tfboard.add_scalars("train", {k: v}, iter_n)
            lr = optimizer.param_groups[0]["lr"]
            # pdb.set_trace()
            # print(epoch, 'lr :', lr)
            tfboard.add_scalars('lr', {'lr-epoch': lr}, iter_n)


def train_one_epoch_unio(cfg, model, memory,criterion,optimizer, data_loader, device, epoch,
                    tfboard=None,gt_multilabel=None,data_loader_source=None,
                         gt_multilabel_source=None,memory_s=None):

    labelpred = MPLP(cfg.THRE_SIM,cfg)
    model.train()
    metric_logger = MetricLogger(delimiter="  ")
    metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
    header = "Epoch: [{}]".format(epoch)

    # warmup learning rate in the first epoch
    warmup=cfg.WARMUP
    if epoch == 0 and warmup:
        warmup_factor = 1.0 / 1000
        warmup_iters = len(data_loader) - 1
        warmup_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)

    for i, (images, targets) in enumerate(
            metric_logger.log_every(data_loader, cfg.DISP_PERIOD, header)
    ):
        pdb.set_trace()
        images, targets = to_device(images, targets, device)
        loss_dict, box_embeddings, box_pid_labels = model(images,targets)
        sims,label,inputs,sims_nid,label_nid,inputs_nid= memory(box_embeddings,box_pid_labels,epoch)# forward （input_feats, single_class_label)
        if len(inputs)!=0: # inputs是提取的box特征，sims是logits

            if gt_multilabel is not None:
                use_gtids=True
            else: use_gtids=False

            if use_gtids:
                multilabel= labelpred.predict_gtids_mul(memory.lut.detach().clone(),
                            label.detach().clone(),epoch,gt_multilabel)
            else:
                multilabel = labelpred.predict_ml(memory.lut.detach().clone(), label.detach().clone(),epoch)

            #利用多标签进行reid多分类
            loss_box_reid = criterion(sims, multilabel, True, memory.lut.detach().clone(),inputs)#smi and inputs with grad

        #source loss
        if i%len(data_loader_source)==0:
            data_loader_p_iter=iter(data_loader_source)
        images_s,targets_s = next(data_loader_p_iter)
        images_s, targets_s = to_device(images_s, targets_s, device)
        loss_dict_s, box_embeddings_s, box_pid_labels_s = model(images_s, targets_s)
        sims_s, label_s, inputs_s, sims_nid, label_nid, inputs_nid = memory_s(box_embeddings_s, box_pid_labels_s,
                                                                      epoch)  # forward （input_feats, single_class_label)
        multilabel_s = labelpred.predict_gtids_mul(memory_s.lut.detach().clone(),
                                                 label_s.detach().clone(), epoch, gt_multilabel_source)
        # 利用多标签进行source reid多分类
        loss_box_reid_s = criterion(sims_s, multilabel_s, True, memory_s.lut.detach().clone(),
                                  inputs_s)  # smi and inputs with grad

        for k,v in loss_dict.items():
            loss_dict[k]=v+loss_dict_s[k]

        loss_dict.update(loss_box_reid=loss_box_reid)
        loss_dict.update(loss_box_reid_s=loss_box_reid_s)

        # sum loss
        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            print(f"Loss is {loss_value}, stopping training")
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        # print(i, sum(memory.lut[label[0]]))
        losses.backward()
        # print(i, sum(memory.lut[label[0]]))

        if cfg.SOLVER.CLIP_GRADIENTS > 0:
            clip_grad_norm_(model.parameters(), cfg.SOLVER.CLIP_GRADIENTS)
        optimizer.step()

        if epoch == 0 and warmup:
            warmup_scheduler.step()

        metric_logger.update(loss=loss_value, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
        if tfboard:
            iter_n = epoch * len(data_loader) + i
            for k, v in loss_dict_reduced.items():
                tfboard.add_scalars("train", {k: v}, iter_n)
            lr = optimizer.param_groups[0]["lr"]
            # pdb.set_trace()
            # print(epoch, 'lr :', lr)
            tfboard.add_scalars('lr', {'lr-epoch': lr}, iter_n)


def train_one_epoch_p(cfg, model, optimizer, data_loader, device, epoch, tfboard, data_loader_p):
    model.train()
    metric_logger = MetricLogger(delimiter="  ")
    metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
    header = "Epoch: [{}]".format(epoch)

    # warmup learning rate in the first epoch
    if epoch == 0:
        warmup_factor = 1.0 / 1000
        # FIXME: min(1000, len(data_loader) - 1)
        warmup_iters = len(data_loader) - 1
        warmup_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
    for i, (images, targets) in enumerate(
            metric_logger.log_every(data_loader, cfg.DISP_PERIOD, header)
    ):
        # print(targets[0])
        if i % len(data_loader_p) == 0:
            data_loader_p_iter = iter(data_loader_p)
        images_p, targets_p = next(data_loader_p_iter)
        images_p, targets_p = to_device(images_p, targets_p, device)
        images, targets = to_device(images, targets, device)
        images = images + images_p
        targets = targets + targets_p

        loss_dict = model(images, targets)
        del images, images_p, targets, targets_p
        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            print(f"Loss is {loss_value}, stopping training")
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        losses.backward()
        if cfg.SOLVER.CLIP_GRADIENTS > 0:
            clip_grad_norm_(model.parameters(), cfg.SOLVER.CLIP_GRADIENTS)
        optimizer.step()

        if epoch == 0:
            warmup_scheduler.step()

        metric_logger.update(loss=loss_value, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
        if tfboard:
            iter_n = epoch * len(data_loader) + i
            for k, v in loss_dict_reduced.items():
                tfboard.add_scalars("train", {k: v}, iter_n)
            lr = optimizer.param_groups[0]["lr"]
            # pdb.set_trace()
            # print(epoch, 'lr :', lr)
            tfboard.add_scalars('lr', {'lr-epoch': lr}, iter_n)


@torch.no_grad()
def evaluate_performance(
        model, gallery_loader, query_loader, device, use_gt=False, use_cache=False, use_cbgm=False
):
    """
    Args:
        use_gt (bool, optional): Whether to use GT as detection results to verify the upper
                                bound of person search performance. Defaults to False.
        use_cache (bool, optional): Whether to use the cached features. Defaults to False.
        use_cbgm (bool, optional): Whether to use Context Bipartite Graph Matching algorithm.
                                Defaults to False.
    """
    model.eval()
    if use_cache:
        eval_cache = torch.load("../eval_cache.pth")
        gallery_dets = eval_cache["gallery_dets"]
        gallery_feats = eval_cache["gallery_feats"]
        query_dets = eval_cache["query_dets"]
        query_feats = eval_cache["query_feats"]
        query_box_feats = eval_cache["query_box_feats"]
    else:
        gallery_dets, gallery_feats = [], []
        for images, targets in tqdm(gallery_loader, ncols=0):
            # pdb.set_trace();use_gt=True
            images, targets = to_device(images, targets, device)
            if not use_gt:
                outputs = model(images)
            else:
                boxes = targets[0]["boxes"]
                n_boxes = boxes.size(0)
                embeddings = model(images, targets)
                outputs = [
                    {
                        "boxes": boxes,
                        "embeddings": torch.cat(embeddings),
                        "labels": torch.ones(n_boxes).to(device),
                        "scores": torch.ones(n_boxes).to(device),
                    }
                ]

            for output in outputs:
                box_w_scores = torch.cat([output["boxes"], output["scores"].unsqueeze(1)], dim=1)
                gallery_dets.append(box_w_scores.cpu().numpy())
                gallery_feats.append(output["embeddings"].cpu().numpy())

        # regarding query image as gallery to detect all people
        # i.e. query person + surrounding people (context information)
        query_dets, query_feats = [], []
        for images, targets in tqdm(query_loader, ncols=0):
            images, targets = to_device(images, targets, device)
            # targets will be modified in the model, so deepcopy it
            outputs = model(images, deepcopy(targets), query_img_as_gallery=True)

            # consistency check
            gt_box = targets[0]["boxes"].squeeze()
            assert (
                           gt_box - outputs[0]["boxes"][0]
                   ).sum() <= 0.001, "GT box must be the first one in the detected boxes of query image"

            for output in outputs:
                box_w_scores = torch.cat([output["boxes"], output["scores"].unsqueeze(1)], dim=1)
                query_dets.append(box_w_scores.cpu().numpy())
                query_feats.append(output["embeddings"].cpu().numpy())

        # extract the features of query boxes
        query_box_feats = []
        for images, targets in tqdm(query_loader, ncols=0):
            images, targets = to_device(images, targets, device)
            embeddings = model(images, targets)
            assert len(embeddings) == 1, "batch size in test phase should be 1"
            query_box_feats.append(embeddings[0].cpu().numpy())

        save_dict = {
            "gallery_dets": gallery_dets,
            "gallery_feats": gallery_feats,
            "query_dets": query_dets,
            "query_feats": query_feats,
            "query_box_feats": query_box_feats,
        }
        torch.save(save_dict, "../eval_cache.pth")

    eval_all = eval_detection(gallery_loader.dataset, gallery_dets, det_thresh=0.1, iou_thresh=0.5)
    eval_labeled = eval_detection(gallery_loader.dataset, gallery_dets, det_thresh=0.1, iou_thresh=0.5,
                                  labeled_only=True)
    eval_search_func = (
        eval_search_cuhk if gallery_loader.dataset.name == "CUHK-SYSU" else eval_search_prw
    )
    eval_reid = eval_search_func(
        gallery_loader.dataset,
        query_loader.dataset,
        gallery_dets,
        gallery_feats,
        query_box_feats,
        query_dets,
        query_feats,
        cbgm=use_cbgm,
    )
    return eval_all, eval_labeled, eval_reid
