import copy
import random
from collections import defaultdict

import numpy as np
import torch
import torchvision.transforms as T
from torch.utils.data import DataLoader, Sampler


from timm.data.random_erasing import RandomErasing

from .dukemtmcreid import DukeMTMCreID
from .market1501 import Market1501,ImageDataset

import torch.distributed as dist


def train_collate_fn(batch):
    """
    # collate_fn这个函数的输入就是一个list，list的长度是一个batch size，list中的每个元素都是__getitem__得到的结果 torch.stack(imgs, dim=0)，拼成 batch
    """
    imgs, pids, camids, viewids , _ = zip(*batch)
    pids = torch.tensor(pids, dtype=torch.int64)
    viewids = torch.tensor(viewids, dtype=torch.int64)
    camids = torch.tensor(camids, dtype=torch.int64)
    return torch.stack(imgs, dim=0), pids, camids, viewids,

def val_collate_fn(batch):
    imgs, pids, camids, viewids, img_paths = zip(*batch)
    viewids = torch.tensor(viewids, dtype=torch.int64)
    camids_batch = torch.tensor(camids, dtype=torch.int64)
    pids = torch.tensor(pids, dtype=torch.int64)
    return torch.stack(imgs, dim=0), pids, camids, camids_batch, viewids, img_paths
class RandomIdentitySampler(Sampler):
    """
    Randomly sample N identities, then for each identity,
    randomly sample K instances, therefore batch size is N*K.
    Args:
    - data_source (list): list of (img_path, pid, camid).
    - num_instances (int): number of instances per identity in a batch.
    - batch_size (int): number of examples in a batch.
    """

    def __init__(self, data_source, batch_size, num_instances):
        self.data_source = data_source
        self.batch_size = batch_size
        self.num_instances = num_instances#num_instances：每个身份（pid）的样本数。
        self.num_pids_per_batch = self.batch_size // self.num_instances
        self.index_dic = defaultdict(list) #dict with list value
        #{783: [0, 5, 116, 876, 1554, 2041],...,}#num_pids_per_batch = batch_size // num_instances：每个 batch 需要多少个身份。
        """
        index_dic 作用：建立一个 pid -> 索引列表 的映射。例如：
        {783: [0, 5, 116, 876, 1554], 205: [1, 10, 500, 900], ...}
        这表示 身份 783 对应数据集中索引为 0、5、116、876、1554 的图像。
        """
        for index, (_, pid, _, _,_) in enumerate(self.data_source):
            self.index_dic[pid].append(index)
        self.pids = list(self.index_dic.keys())

        # estimate number of examples in an epoch
        self.length = 0  # 初始化数据集长度
        for pid in self.pids:  # 遍历所有身份（person IDs）
            idxs = self.index_dic[pid]  # 获取当前身份的所有样本索引
            num = len(idxs)  # 该身份的样本数量
            if num < self.num_instances:
                num = self.num_instances  # 如果该身份的样本数量小于 num_instances，则补足 num_instances
            self.length += num - num % self.num_instances  # 计算该身份能贡献的有效样本数，并累加

    def __iter__(self):
        batch_idxs_dict = defaultdict(list)

        for pid in self.pids:
            idxs = copy.deepcopy(self.index_dic[pid])
            #每个身份的数据索引 可能不够 num_instances，如果不足，就 随机重复采样补充。
            if len(idxs) < self.num_instances:
                idxs = np.random.choice(idxs, size=self.num_instances, replace=True)
            random.shuffle(idxs)
            batch_idxs = []
            for idx in idxs:
                batch_idxs.append(idx)
                if len(batch_idxs) == self.num_instances:
                    batch_idxs_dict[pid].append(batch_idxs)
                    batch_idxs = []
        """
        每次从 avai_pids（可用身份列表）里随机选 num_pids_per_batch 个身份。
        对这些身份各取 num_instances 张图片，构成当前 batch。
        如果某个身份的样本都被取完，就从 avai_pids 里移除，表示这个身份的数据用完了。
        """
        avai_pids = copy.deepcopy(self.pids)
        final_idxs = []

        while len(avai_pids) >= self.num_pids_per_batch:
            selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
            for pid in selected_pids:
                batch_idxs = batch_idxs_dict[pid].pop(0)
                final_idxs.extend(batch_idxs)
                if len(batch_idxs_dict[pid]) == 0:
                    avai_pids.remove(pid)
        print(final_idxs)
        return iter(final_idxs)

    def __len__(self):
        return self.length
class TripletSampler(Sampler):
    """
    自定义采样器，确保每个 batch 包含多个相同 ID 的样本，以便正确计算 TripletLoss。
    每个 batch 选取 N_IDENTITY 个不同行人，每个行人选取 M_IMAGES 张图片。采样时保证每个 batch 既包含正样本（相同 ID 的不同图片），也包含负样本（不同 ID）。
    """

    def __init__(self, dataset, batch_size, n_identity):
        super().__init__()
        print(f"TripletSampler初始化ing, id={id(self)}")  # 打印唯一 ID
        self.dataset = dataset  # 传入 Market1501 解析的数据集
        self.batch_size = batch_size  # 批次大小
        self.n_identity = n_identity  # 每个批次中包含的不同行人 ID 数量
        self.m_images = batch_size // n_identity  # 每个行人包含的图片数
        self.data_by_id = defaultdict(list)
        for idx, (_, pid, _ , _ , _) in enumerate(dataset):
            self.data_by_id[pid].append(idx)  # 按行人 ID 存储样本

        self.pids = list(self.data_by_id.keys())  # 获取所有行人 ID

    def __iter__(self):
        """
        生成器方法，使用 `yield` 按需生成 batch，而不是一次性返回所有数据。
        """
        #print("TripletSampler的__iter__执行中")
        random.shuffle(self.pids)  # 随机打乱行人 ID 顺序
        batch = []
        final_idxs=[]
        for pid in self.pids:
            # 选取 M_IMAGES 张图像，保证同一批次有多个相同 ID 的样本
            samples = random.sample(self.data_by_id[pid], min(self.m_images, len(self.data_by_id[pid])))
            #print(len(samples))
            while len(samples) < self.m_images:
                extra_sample = random.choice(self.data_by_id[pid])  # 从已有图片中随机选择一张
                samples.append(extra_sample)
            batch.extend(samples)# 获取索引
            if len(batch) >= self.batch_size:
                #print(f"Sampled batch: {batch}")  # Debug: 打印 batch 结果
                final_idxs.extend(batch[:self.batch_size])  # 生成当前批次
                batch = []  # 清空 batch，开始下一个批次
        return iter(final_idxs)
    def __len__(self):
        """
        返回总批次数量，方便 DataLoader 计算 epoch 内有多少个 batch。让for _ in dataloader:知道什么时候结束

        """
        return len(self.pids)*self.m_images  # 计算 batch 数量
def make_dataloader():
    train_transforms = T.Compose([
        T.Resize([224, 224], interpolation=3),
        T.RandomHorizontalFlip(p=0.5),
        T.Pad(10),
        T.RandomCrop([224, 224]),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        RandomErasing(probability=0.5, mode='pixel', max_count=1, device='cuda'),
        # RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
    ])

    val_transforms = T.Compose([
        T.Resize([224, 224]),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])


    dataset = Market1501()

    classes_num = dataset.num_train_pids
    cam_num = dataset.num_train_cams
    view_num = dataset.num_train_vids
    query_num = dataset.num_query_imgs
    train_set = ImageDataset(dataset.train, train_transforms)
    train_loader = DataLoader(train_set, batch_size=32,sampler=TripletSampler(train_set, 32, 4),collate_fn=train_collate_fn)
    val_set = ImageDataset(dataset.query+dataset.gallery, val_transforms)
    val_loader = DataLoader(val_set, batch_size=32, shuffle=False, collate_fn=val_collate_fn)
    return train_loader, val_loader, query_num, classes_num, cam_num, view_num



def make_duke_dataloader():
    train_transforms = T.Compose([
        T.Resize([224, 224], interpolation=3),
        T.RandomHorizontalFlip(p=0.5),
        T.Pad(10),
        T.RandomCrop([224, 224]),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        RandomErasing(probability=0.5, mode='pixel', max_count=1, device='cuda'),
        # RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
    ])

    val_transforms = T.Compose([
        T.Resize([224, 224]),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    dataset  = DukeMTMCreID()

    classes_num = dataset.num_train_pids
    cam_num = dataset.num_train_cams
    view_num = dataset.num_train_vids
    query_num = dataset.num_query_imgs
    train_set = ImageDataset(dataset.train, train_transforms)
    train_loader = DataLoader(train_set, batch_size=32, sampler=TripletSampler(train_set, 32, 4),
                              collate_fn=train_collate_fn)
    val_set = ImageDataset(dataset.query + dataset.gallery, val_transforms)
    val_loader = DataLoader(val_set, batch_size=32, shuffle=False, collate_fn=val_collate_fn)
    return train_loader, val_loader, query_num, classes_num, cam_num, view_num