# -*- coding=utf-8 -*-
"""
@author: xingwg
@license: (C) Copyright 2020-2025.
@contact: xingweiguo@chinasvt.com
@project: boya-reid
@file: data_factory.py
@time: 2020/9/12 19:51
@desc:
"""
import glog
from PIL import Image

import torch
import torchvision.transforms as T
from torch.utils.data import DataLoader

from .naic import NAIC
from .base_dataset import ImageDataset
from .data_augmentation import (
    RandomErasing,
    RandomShift,
)
from .data_sampler import (
    RandomIdentitySampler,
    RandomIdentitySamplerV2,
)

__dataset_map = {
    "naic": NAIC
}


def train_collate_fn(batch):
    """
    collate_fn这个函数的输入就是一个list，list的长度是一个batch size，
    list中的每个元素都是__getitem__得到的结果
    """
    imgs, person_ids, _, _ = zip(*batch)
    person_ids = torch.tensor(person_ids, dtype=torch.int64)
    return torch.stack(imgs, dim=0), person_ids


def val_collate_fn(batch):
    imgs, person_ids, cam_ids, img_paths = zip(*batch)
    return torch.stack(imgs, dim=0), person_ids, cam_ids, img_paths


def make_dataloader(cfg):
    if cfg.DATASETS.HARD_AUG:
        train_transforms = T.Compose([
            # T.Resize(
            #     size=cfg.INPUT.SIZE_TRAIN,
            #     interpolation=Image.BILINEAR
            # ),
            # T.RandomHorizontalFlip(
            #     p=cfg.INPUT.PROB
            # ),
            # T.Pad(
            #     padding=cfg.INPUT.PADDING,
            #     fill=0,
            #     padding_mode='constant'
            # ),
            # T.RandomCrop(
            #     size=cfg.INPUT.SIZE_TRAIN,
            #     padding=None,
            #     pad_if_needed=False,
            #     fill=0,
            #     padding_mode='constant'
            # ),
            RandomShift(
                prob=0.5,
                rc_min=0.7,
                rh_min=0.5,
                rw_min=0.5,
                mean=cfg.INPUT.PIXEL_MEAN
            ),
            T.Resize(
                size=cfg.INPUT.SIZE_TRAIN,
                interpolation=Image.BILINEAR
            ),
            T.RandomHorizontalFlip(
                p=cfg.INPUT.PROB
            ),
            T.ColorJitter(
                brightness=0.5,
                contrast=0.5,
                saturation=0.5,
                hue=0.2
            ),
            # T.transforms.RandomAffine(
            #     degrees=0,
            #     translate=None,
            #     scale=[0.9, 1.1],
            #     shear=None,
            #     resample=False,
            #     fillcolor=128
            # ),
            T.ToTensor(),
            T.Normalize(
                mean=cfg.INPUT.PIXEL_MEAN,
                std=cfg.INPUT.PIXEL_STD
            ),
            RandomErasing(
                probability=cfg.INPUT.RE_PROB,
                mean=cfg.INPUT.PIXEL_MEAN
            )
        ])
    else:
        train_transforms = T.Compose([
            T.Resize(
                size=cfg.INPUT.SIZE_TRAIN,
                interpolation=Image.BILINEAR
            ),
            T.RandomHorizontalFlip(
                p=cfg.INPUT.PROB
            ),
            T.Pad(
                cfg.INPUT.PADDING
            ),
            T.RandomCrop(
                cfg.INPUT.SIZE_TRAIN
            ),
            T.ToTensor(),
            T.Normalize(
                mean=cfg.INPUT.PIXEL_MEAN,
                std=cfg.INPUT.PIXEL_STD
            ),
            RandomErasing(
                probability=cfg.INPUT.RE_PROB,
                mean=cfg.INPUT.PIXEL_MEAN
            )
        ])

    val_transforms = T.Compose([
        T.Resize(
            size=cfg.INPUT.SIZE_TRAIN,
            interpolation=Image.BILINEAR
        ),
        T.ToTensor(),
        T.Normalize(
            mean=cfg.INPUT.PIXEL_MEAN,
            std=cfg.INPUT.PIXEL_STD
        )
    ])

    num_workers = cfg.DATALOADER.NUM_WORKERS

    dataset = __dataset_map[cfg.DATASETS.NAMES](data_dir=cfg.DATASETS.ROOT_DIR)
    num_classes = dataset.num_train_person_ids

    train_set = ImageDataset(dataset.train_dataset, train_transforms)

    train_loader = None
    if "triplet" in cfg.DATALOADER.SAMPLER:
        train_loader = DataLoader(
            train_set,
            batch_size=cfg.SOLVER.IMS_PER_BATCH,
            # sampler=RandomIdentitySampler(
            #     dataset.train_dataset,
            #     cfg.SOLVER.IMS_PER_BATCH,
            #     cfg.DATALOADER.NUM_INSTANCE
            # ),
            sampler=RandomIdentitySamplerV2(
                dataset.train_dataset,
                cfg.SOLVER.IMS_PER_BATCH,
                cfg.DATALOADER.NUM_INSTANCE
            ),
            pin_memory=True,
            num_workers=num_workers,
            collate_fn=train_collate_fn
        )
    elif cfg.DATALOADER.SAMPLER == "softmax":
        glog.info("using softmax sampler")
        train_loader = DataLoader(
            train_set,
            batch_size=cfg.SOLVER.IMS_PER_BATCH,
            shuffle=True,
            pin_memory=True,
            num_workers=num_workers,
            collate_fn=train_collate_fn
        )
    else:
        glog.fatal("unsupported sampler! expected softmax or triplet but got {}".format(cfg.SAMPLER))

    val_set = ImageDataset(dataset.query_dataset+dataset.gallery_dataset, val_transforms)
    val_loader = DataLoader(
        val_set,
        batch_size=cfg.TEST.IMS_PER_BATCH,
        shuffle=False,
        pin_memory=True,
        num_workers=num_workers,
        collate_fn=val_collate_fn
    )

    return train_loader, val_loader, len(dataset.query_dataset), num_classes
    # return train_loader, num_classes
