from torch.utils.data import Dataset,DataLoader
import os
from PIL import Image
import torch
from torchvision.transforms import Compose,Resize,ToTensor,Normalize

class Market1501(Dataset):
    def __init__(self,pre_process=None,data_dir=None,extra_annotations=None,mode='test',color_space='RGB'):
        super().__init__()
        assert mode in ['test','train']
        self.mode = mode
        query_dir = os.path.join(data_dir,'query')
        test_dir = os.path.join(data_dir,'bounding_box_test')
        train_dir = os.path.join(data_dir,'bounding_box_train')
        
        # 首先收集所有人物ID
        person_id_container = set()
        
        if self.mode == 'test':
            _dirs =  [query_dir,test_dir]
        elif self.mode == 'train':
            _dirs = [train_dir]
        print(self.mode,_dirs)
        for directory in _dirs:
            for file in os.listdir(directory):
                if file.endswith(('.png', '.jpg', '.jpeg')):
                    features = file.split('_')
                    person_id = int(features[0])

                    if person_id == -1:
                        continue
                    person_id_container.add(person_id)
        
        # 创建全局ID映射
        self.person_id2label = {pid: label for label, pid in enumerate(sorted(person_id_container))}
        
        # 然后使用全局ID映射获取数据
        if self.mode=='test':
            self.query_data = self.get_data(query_dir)
            self.gallery_data = self.get_data(test_dir)
            self.data = self.query_data+self.gallery_data
        elif self.mode =='train':
            self.train_data = self.get_data(train_dir)

        
        self.color_space = color_space
        self.pre_process = pre_process

    def get_data(self,dir):
        data = []
        for file in os.listdir(dir):
            if file.endswith(('.png', '.jpg', '.jpeg')):
                features = file.split('_')
                person_id = int(features[0])
                if person_id == -1:
                    continue
                assert len(features[1]) == 4
                camera_id = int(features[1][:2][1:])
                video_id = int(features[1][2:][1:])

                assert 0 <= person_id <= 1501
                assert 1 <= camera_id <= 6
                
                # if person_id not in self.person_id2label:
                #     print(file)
                #     continue
                data.append({
                    'image_path': os.path.join(dir, file),
                    'person_id': self.person_id2label[person_id],
                    'camera_id': camera_id,
                    'video_id': video_id,
                    'original_pid': person_id
                })
        return data

    def __len__(self):
        if self.mode == 'test':
            return (len(self.query_data)+len(self.gallery_data))//2
        elif self.mode == 'train':
            return len(self.train_data)
    def __getitem__(self, index):
        if self.mode == 'test':
            data = self.data[index]
            image_path = data['image_path']
            person_id = data['person_id']
            camera_id = data['camera_id']
            video_id = data['video_id']

            image = Image.open(image_path).convert(self.color_space)
            if self.pre_process is not None:
                image = self.pre_process(image)
            filename = image_path.split('/')[-1]

            return image,person_id,camera_id,video_id,filename
        elif self.mode == 'train':
            data = self.train_data[index]
            image_path = data['image_path']
            person_id = data['person_id']
            camera_id = data['camera_id']
            video_id = data['video_id']

            image = Image.open(image_path).convert(self.color_space)
            if self.pre_process is not None:
                image = self.pre_process(image)
            filename = image_path.split('/')[-1]

            return image,person_id,camera_id,video_id,filename




class DukeMTMC(Dataset):
    def __init__(self,pre_process=None,
                      data_dir=None,
                      extra_annotations =None,
                      mode='test',
                      color_space='RGB'
                      
                    ):
        super().__init__()
        
        assert mode in ['test','train']
        self.mode = mode
        query_dir = os.path.join(data_dir,'query')
        test_dir = os.path.join(data_dir,'bounding_box_test')
        train_dir = os.path.join(data_dir,'bounding_box_train')
        
        person_id_contrainer = set()
        
        for directory in [query_dir,test_dir,train_dir]:
            for file in os.listdir(directory):
                if file.endswith(('.png','.jpg','.jpeg')):
                    features = file.split('_')
                    person_id = int(features[0])
                    if person_id == -1:
                        continue
                    person_id_contrainer.add(person_id)
        
        self.person_id2label = {pid:label for label,pid in enumerate(sorted(person_id_contrainer))}

        self.query_data = self.get_data(query_dir)
        self.gallery_data = self.get_data(test_dir)
        self.train_data = self.get_data(train_dir)
        
        self.data = self.query_data+self.gallery_data
        self.color_space= color_space
        self.pre_process = pre_process
    def get_data(self,dir):
        data = []
        for file in os.listdir(dir):
            if file.endswith(('.png','.jpg','.jpeg')):
                features = file.split('_')
                person_id = int(features[0])
                if person_id == -1:
                    continue
                camera_id = int(features[1].split("s")[0][1:])
                video_id = 0
                
                data.append({
                    'image_path':os.path.join(dir,file),
                    'person_id':self.person_id2label[person_id],
                    'camera_id':camera_id,
                    'video_id':video_id,
                    'original_pid':person_id
                })
        return data

    def __len__(self):
        if self.mode == 'test':
            return len(self.query_data)+len(self.gallery_data)
        elif self.mode == 'train':
            return len(self.train_data)
    def __getitem__(self, index):
        if self.mode == 'test':
            data = self.data[index]
        elif self.mode == 'train':
            data = self.train_data[index]
        else:
            raise ValueError()
        image_path = data['image_path']
        person_id = data['person_id']
        camera_id = data['camera_id']
        video_id = data['video_id']

        image = Image.open(image_path).convert(self.color_space)
        image = self.pre_process(image)
        filename = image_path.split('/')[-1]

        return image,person_id,camera_id,video_id,filename
            



class MSMT(Dataset):
    def __init__(self,pre_process=None,data_dir=None,extra_annotatinos=None,mode='test',color_space="RGB"):
        super().__init__()
        
        assert mode in ['test','train']
        self.mode = mode
        query_dir = os.path.join(data_dir,'query')
        test_dir = os.path.join(data_dir,'bounding_box_test')
        train_dir = os.path.join(data_dir,'bounding_box_train')
        person_id_container = set()
        for directory in [query_dir,test_dir,train_dir]:
            for file in os.listdir(directory):
                if file.endswith(('.png','.jpg','.jpeg')):
                    features = file.split('_')
                    person_id = int(features[0])
                    if person_id == -1:
                        continue
                    person_id_container.add(person_id)
        
        self.person_id2label = {pid:label for label,pid in enumerate(sorted(person_id_container))}
        
        self.query_data = self.get_data(query_dir)
        self.gallery_data = self.get_data(test_dir)
        self.train_data = self.get_data(train_dir)

        self.data = self.query_data+self.gallery_data
        self.color_space = color_space
        self.pre_process = pre_process
        
    def get_data(self,dir):
        data = []
        for file in os.listdir(dir):
            if file.endswith(('.png', '.jpg', '.jpeg')):
                features = file.split('_')
                person_id = int(features[0])
                if person_id == -1:
                    continue
                # assert len(features[1]) == 4
                camera_id = int(features[1].split('s')[0][1:])
                video_id = 0

                data.append({
                    'image_path': os.path.join(dir, file),
                    'person_id': self.person_id2label[person_id],
                    'camera_id': camera_id,
                    'video_id': video_id,
                    'original_pid': person_id
                })
        return data
    def __len__(self):
        if self.mode == 'test':
            return len(self.query_data)+len(self.gallery_data)
        elif self.mode == 'train':
            return len(self.train_data)
        
    def __getitem__(self, index):
        if self.mode == 'test':
            data = self.data[index]
        elif self.mode == 'train':
            data = self.data[index]
        else:
            raise ValueError()
        
        image_path = data['image_path']
        person_id = data['person_id']
        camera_id = data['camera_id']
        video_id = data['video_id']

        image = Image.open(image_path).convert(self.color_space)
        image = self.pre_process(image)
        filename = image_path.split('/')[-1]

        return image,person_id,camera_id,video_id,filename




def collate_fn(batch):
    images = []
    person_ids = []
    camera_ids = []
    video_ids = []
    filenames = []
    for img,pid,camid,vid,filename in batch:
        images.append(img)
        person_ids.append(pid)
        camera_ids.append(camid)
        video_ids.append(vid)
        filenames.append(filename)
    return torch.stack(images,dim=0),torch.tensor(person_ids),torch.tensor(camera_ids),torch.tensor(video_ids),filenames






if __name__ == "__main__":
    
    

    

    # dataset = Market1501(
    #     pre_process=None,
    #     data_dir = "/Users/luotianhang/Documents/dataset/reid/Market-1501-v15.09.15",
    #     extra_annotations=None,
    #     mode='test'
    # )

    # print(len(dataset))
    pre_process = Compose([
        Resize((256,128)),
        ToTensor(),
        Normalize(mean=(0.485,0.456,0.406),
                  std=(0.229,0.224,0.225))
    ])
    dataset = Market1501(
        pre_process=pre_process,
        data_dir="/Users/luotianhang/Documents/dataset/reid/Market-1501-v15.09.15",
        extra_annotations=None,
        mode='test'
    )
    dataloader = DataLoader(dataset,1)

    for d in dataloader:
        image = d[0].cpu().numpy()
        print(image.shape)