import os.path as osp
import pdb

import numpy as np
from scipy.io import loadmat

from .base import BaseDataset
from PIL import Image
import torch
ins_min,ins_max = 64,190
class CUHKSYSU(BaseDataset):
    def __init__(self, root, transforms, split):
        self.name = "CUHK-SYSU"
        self.img_prefix = osp.join(root, "Image", "SSM")
        super(CUHKSYSU, self).__init__(root, transforms, split)

    def _load_queries(self):
        # TestG50: a test protocol, 50 gallery images per query
        protoc = loadmat(osp.join(self.root, "annotation/test/train_test/TestG50.mat"))
        protoc = protoc["TestG50"].squeeze()
        queries = []
        for item in protoc["Query"]:
            img_name = str(item["imname"][0, 0][0])
            roi = item["idlocate"][0, 0][0].astype(np.int32)
            roi[2:] += roi[:2]
            queries.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": roi[np.newaxis, :],
                    "pids": np.array([-100]),  # dummy pid
                }
            )
        return queries

    def _load_split_img_names(self):
        """
        Load the image names for the specific split.
        """
        assert self.split in ("train", "gallery")
        # gallery images
        gallery_imgs = loadmat(osp.join(self.root, "annotation", "pool.mat"))
        gallery_imgs = gallery_imgs["pool"].squeeze()
        gallery_imgs = [str(a[0]) for a in gallery_imgs]
        if self.split == "gallery":
            return gallery_imgs
        # all images
        all_imgs = loadmat(osp.join(self.root, "annotation", "Images.mat"))
        all_imgs = all_imgs["Img"].squeeze()
        all_imgs = [str(a[0][0]) for a in all_imgs]
        # training images = all images - gallery images
        training_imgs = sorted(list(set(all_imgs) - set(gallery_imgs)))
        return training_imgs

    def _load_annotations(self):
        if self.split == "query":
            return self._load_queries()

        # load all images and build a dict from image to boxes
        all_imgs = loadmat(osp.join(self.root, "annotation", "Images.mat"))
        all_imgs = all_imgs["Img"].squeeze()
        name_to_boxes = {}
        
        name_to_pids = {}
        unlabeled_pid = 5555  # default pid for unlabeled people
        for img_name, _, boxes in all_imgs:
            img_name = str(img_name[0])
            boxes = np.asarray([b[0] for b in boxes[0]])
            boxes = boxes.reshape(boxes.shape[0], 4)  # (x1, y1, w, h)
            valid_index = np.where((boxes[:, 2] > 0) & (boxes[:, 3] > 0))[0]
            assert valid_index.size > 0, "Warning: {} has no valid boxes.".format(img_name)
            boxes = boxes[valid_index]
            name_to_boxes[img_name] = boxes.astype(np.int32)
            name_to_pids[img_name] = unlabeled_pid * np.ones(boxes.shape[0], dtype=np.int32)

        def set_box_pid(boxes, box, pids, pid):
            for i in range(boxes.shape[0]):
                if np.all(boxes[i] == box):
                    pids[i] = pid
                    return

        # assign a unique pid from 1 to N for each identity
        if self.split == "train":
            train = loadmat(osp.join(self.root, "annotation/test/train_test/Train.mat"))
            train = train["Train"].squeeze()
            for index, item in enumerate(train):
                scenes = item[0, 0][2].squeeze()
                for img_name, box, _ in scenes:
                    img_name = str(img_name[0])
                    box = box.squeeze().astype(np.int32)
                    set_box_pid(name_to_boxes[img_name], box, name_to_pids[img_name], index + 1)
        else:
            protoc = loadmat(osp.join(self.root, "annotation/test/train_test/TestG50.mat"))
            protoc = protoc["TestG50"].squeeze()
            for index, item in enumerate(protoc):
                # query
                im_name = str(item["Query"][0, 0][0][0])
                box = item["Query"][0, 0][1].squeeze().astype(np.int32)
                set_box_pid(name_to_boxes[im_name], box, name_to_pids[im_name], index + 1)
                # gallery
                gallery = item["Gallery"].squeeze()
                for im_name, box, _ in gallery:
                    im_name = str(im_name[0])
                    if box.size == 0:
                        break
                    box = box.squeeze().astype(np.int32)
                    set_box_pid(name_to_boxes[im_name], box, name_to_pids[im_name], index + 1)

        annotations = []
        imgs = self._load_split_img_names()
        for img_name in imgs:
            boxes = name_to_boxes[img_name]
            boxes[:, 2:] += boxes[:, :2]  # (x1, y1, w, h) -> (x1, y1, x2, y2)
            pids = name_to_pids[img_name]
            annotations.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": boxes,
                    "pids": pids,
                }
            )
        return annotations



class CUHKSYSU_two(BaseDataset):
    def __init__(self, root, transforms, split):
        self.name = "CUHK-SYSU"
        self.img_prefix = osp.join(root, "Image", "SSM")
        super(CUHKSYSU_two, self).__init__(root, transforms, split)

    def _load_queries(self):
        # TestG50: a test protocol, 50 gallery images per query
        protoc = loadmat(osp.join(self.root, "annotation/test/train_test/TestG50.mat"))
        protoc = protoc["TestG50"].squeeze()
        queries = []
        for item in protoc["Query"]:
            img_name = str(item["imname"][0, 0][0])
            roi = item["idlocate"][0, 0][0].astype(np.int32)
            roi[2:] += roi[:2]
            queries.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": roi[np.newaxis, :],
                    "pids": np.array([-100]),  # dummy pid
                }
            )
        return queries

    def _load_split_img_names(self):
        """
        Load the image names for the specific split.
        """
        assert self.split in ("train", "gallery")
        # gallery images
        gallery_imgs = loadmat(osp.join(self.root, "annotation", "pool.mat"))
        gallery_imgs = gallery_imgs["pool"].squeeze()
        gallery_imgs = [str(a[0]) for a in gallery_imgs]
        if self.split == "gallery":
            return gallery_imgs
        # all images
        all_imgs = loadmat(osp.join(self.root, "annotation", "Images.mat"))
        all_imgs = all_imgs["Img"].squeeze()
        all_imgs = [str(a[0][0]) for a in all_imgs]
        # training images = all images - gallery images
        training_imgs = sorted(list(set(all_imgs) - set(gallery_imgs)))
        return training_imgs

    def _load_annotations(self):
        if self.split == "query":
            return self._load_queries()

        # load all images and build a dict from image to boxes
        all_imgs = loadmat(osp.join(self.root, "annotation", "Images.mat"))
        all_imgs = all_imgs["Img"].squeeze()
        name_to_boxes = {}
        
        name_to_pids = {}
        unlabeled_pid = 5555  # default pid for unlabeled people
        for img_name, _, boxes in all_imgs:
            img_name = str(img_name[0])
            boxes = np.asarray([b[0] for b in boxes[0]])
            boxes = boxes.reshape(boxes.shape[0], 4)  # (x1, y1, w, h)
            valid_index = np.where((boxes[:, 2] > 0) & (boxes[:, 3] > 0))[0]
            assert valid_index.size > 0, "Warning: {} has no valid boxes.".format(img_name)
            boxes = boxes[valid_index]
            name_to_boxes[img_name] = boxes.astype(np.int32)
            name_to_pids[img_name] = unlabeled_pid * np.ones(boxes.shape[0], dtype=np.int32)

        def set_box_pid(boxes, box, pids, pid):
            for i in range(boxes.shape[0]):
                if np.all(boxes[i] == box):
                    pids[i] = pid
                    return

        # assign a unique pid from 1 to N for each identity
        if self.split == "train":
            train = loadmat(osp.join(self.root, "annotation/test/train_test/Train.mat"))
            train = train["Train"].squeeze()
            for index, item in enumerate(train):
                scenes = item[0, 0][2].squeeze()
                for img_name, box, _ in scenes:
                    img_name = str(img_name[0])
                    box = box.squeeze().astype(np.int32)
                    set_box_pid(name_to_boxes[img_name], box, name_to_pids[img_name], index + 1)
        else:
            protoc = loadmat(osp.join(self.root, "annotation/test/train_test/TestG50.mat"))
            protoc = protoc["TestG50"].squeeze()
            for index, item in enumerate(protoc):
                # query
                im_name = str(item["Query"][0, 0][0][0])
                box = item["Query"][0, 0][1].squeeze().astype(np.int32)
                set_box_pid(name_to_boxes[im_name], box, name_to_pids[im_name], index + 1)
                # gallery
                gallery = item["Gallery"].squeeze()
                for im_name, box, _ in gallery:
                    im_name = str(im_name[0])
                    if box.size == 0:
                        break
                    box = box.squeeze().astype(np.int32)
                    set_box_pid(name_to_boxes[im_name], box, name_to_pids[im_name], index + 1)

        annotations = []
        imgs = self._load_split_img_names()
        for img_name in imgs:
            boxes = name_to_boxes[img_name]
            boxes[:, 2:] += boxes[:, :2]  # (x1, y1, w, h) -> (x1, y1, x2, y2)
            pids = name_to_pids[img_name]
            annotations.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": boxes,
                    "pids": pids,
                }
            )
        return annotations
    
    def __getitem__(self, index):
        '''
        张建贺 2021.12.25 更改输入增加 crop box imgs
        '''
        anno = self.annotations[index]
        img = Image.open(anno["img_path"]).convert("RGB")

        ##
        img_crops = [img.crop(box).resize((ins_min,ins_max)) for box in anno['boxes']]
        ## 
        
        boxes = torch.as_tensor(anno["boxes"], dtype=torch.float32)
        use_multi_label =True
        if use_multi_label==True:
            # print(anno['pids'])
            labels = torch.as_tensor(anno["pids"])
        else:
            labels = torch.as_tensor(anno["pids"], dtype=torch.int64)
        
        
        target = {"img_name": anno["img_name"], "boxes": boxes, "labels": labels,'img_crops':img_crops}
        if self.transforms is not None:
            img, target = self.transforms(img, target)
            ##
        return img, target

class CUHKSYSU_trainset_yolobox(BaseDataset):
    def __init__(self, yolodet_boxes,root, transforms, split):
        self.name = "CUHK-SYSU"
        self.yolodet_boxes=yolodet_boxes
        self.img_prefix = osp.join(root, "Image", "SSM")
        super(CUHKSYSU_trainset_yolobox, self).__init__(root, transforms, split)

    def _load_queries(self):
        # TestG50: a test protocol, 50 gallery images per query
        protoc = loadmat(osp.join(self.root, "annotation/test/train_test/TestG50.mat"))
        protoc = protoc["TestG50"].squeeze()
        queries = []
        for item in protoc["Query"]:
            img_name = str(item["imname"][0, 0][0])
            roi = item["idlocate"][0, 0][0].astype(np.int32)
            roi[2:] += roi[:2]
            queries.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": roi[np.newaxis, :],
                    "pids": np.array([-100]),  # dummy pid
                }
            )
        return queries

    def _load_split_img_names(self):
        """
        Load the image names for the specific split.
        """
        assert self.split in ("train", "gallery")
        # gallery images
        gallery_imgs = loadmat(osp.join(self.root, "annotation", "pool.mat"))
        gallery_imgs = gallery_imgs["pool"].squeeze()
        gallery_imgs = [str(a[0]) for a in gallery_imgs]
        if self.split == "gallery":
            return gallery_imgs
        # all images
        all_imgs = loadmat(osp.join(self.root, "annotation", "Images.mat"))
        all_imgs = all_imgs["Img"].squeeze()
        all_imgs = [str(a[0][0]) for a in all_imgs]
        # training images = all images - gallery images
        training_imgs = sorted(list(set(all_imgs) - set(gallery_imgs)))
        return training_imgs

    def _load_annotations(self):
        if self.split == "query":
            return self._load_queries()

        annotations = []
        #yolobox =yolodet_boxes['person'] #{'person':[[x1,y1,x2,y2,s,img_path]]}
        img_has_boxes={} #{path:boxes}
        for box_info in self.yolodet_boxes['person']:
            box,img_path=box_info[:4],box_info[-1]
            img_has_boxes[img_path]=[]
        for box_info in self.yolodet_boxes['person']:
            box,img_path=box_info[:4],box_info[-1]
            img_has_boxes[img_path].append(box)
        for img_p,boxes in img_has_boxes.items():
            img_name=img_p.split('/')[-1]
            annotations.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": boxes,
                    "pids": [-1]*len(boxes),
                }
            )
        return annotations

import cv2
from utils.utils import unpickle,pickle
from collections import Counter

class CUHKSYSU_cluster1(BaseDataset):
    def __init__(self, root, transforms, split):
        self.name = "ssm_duke"
        self.img_prefix = osp.join(root, "Image", "SSM")
        super(CUHKSYSU_cluster1, self).__init__(root, transforms, split)

    def _load_queries(self):
        # TestG50: a test protocol, 50 gallery images per query
        protoc = loadmat(osp.join(self.root, "annotation/test/train_test/TestG50.mat"))
        protoc = protoc["TestG50"].squeeze()
        queries = []
        for item in protoc["Query"]:
            img_name = str(item["imname"][0, 0][0])
            roi = item["idlocate"][0, 0][0].astype(np.int32)
            roi[2:] += roi[:2]
            queries.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": roi[np.newaxis, :],
                    "pids": np.array([-100]),  # dummy pid
                }
            )
        return queries

    def _load_split_img_names(self):
        """
        Load the image names for the specific split.
        """
        assert self.split in ("train", "gallery")
        # gallery images
        gallery_imgs = loadmat(osp.join(self.root, "annotation", "pool.mat"))
        gallery_imgs = gallery_imgs["pool"].squeeze()
        gallery_imgs = [str(a[0]) for a in gallery_imgs]
        if self.split == "gallery":
            return gallery_imgs
        # all images
        all_imgs = loadmat(osp.join(self.root, "annotation", "Images.mat"))
        all_imgs = all_imgs["Img"].squeeze()
        all_imgs = [str(a[0][0]) for a in all_imgs]
        # training images = all images - gallery images
        training_imgs = sorted(list(set(all_imgs) - set(gallery_imgs)))
        return training_imgs

    def _load_annotations(self):
        if self.split == "query":
            return self._load_queries()

        # load all images and build a dict from image to boxes
        use_3859_clu=False
        use_2635_clu=False
        use_multi_label=True
        if use_3859_clu==True:
            annotations=unpickle('ssm_cluster1_duke/ssm_duke_anno.pkl')
            use_duke_pid_only=False
            use_yolocluster_only=True
            numpids=[]
            if use_duke_pid_only==True:
                for i,ann_dict in enumerate(annotations):
                    root='/home/cv7609/zjh/ps_raw/ssm_cluster1_with_duke/Image'
                    img_name=ann_dict['img_name']
                    img_path=osp.join(root,img_name)
                    pids=ann_dict['pids']
                    boxes=ann_dict['boxes']
                    new_pids=[]
                    new_boxes=[]
                    for pid,box in zip(pids,boxes):
                        if pid>0 and pid<3860: #排除yolo clu 标签
                            continue
                        else:
                            new_pids.append(pid-3859 if pid!=5555 else 5555)
                            numpids.append(pid-3859 if pid!=5555 else 5555)
                            new_boxes.append(box)
                    # print(new_pids)
                    annotations[i]['pids']=new_pids
                    annotations[i]['img_path']=img_path
                    annotations[i]['boxes']=new_boxes
                    assert len(new_boxes)==len(new_pids)
                pset=sorted(list(set(numpids)))
                print(pset)
                print(pset[0],pset[-1])
                print(len(numpids))
                print(len(pset))
            if use_yolocluster_only==True:
                newannotations=[]
                for i,ann_dict in enumerate(annotations):
                    root='/home/cv7609/zjh/ps_raw/ssm_cluster1_with_duke/Image'
                    img_name=ann_dict['img_name']
                    img_path=osp.join(root,img_name)
                    pids=ann_dict['pids']
                    boxes=ann_dict['boxes']
                    new_pids=[]
                    new_boxes=[]
                    for pid,box in zip(pids,boxes):
                        if pid>0 and pid<3860 or pid==5555: #onlu yolo clu 标签

                            new_pids.append(pid)
                            numpids.append(pid)
                            new_boxes.append(box)
                        else:
                            continue
                    # print(new_pids)
                    if len(new_pids)>0:
                        anno={
                            'img_name':img_name,
                            'img_path':img_path,
                            'boxes':new_boxes,
                            'pids':new_pids
                        }
                        newannotations.append(anno)
                    assert len(new_boxes)==len(new_pids)
                pset=sorted(list(set(numpids)))
                # print(pset)
                print(pset[0],pset[-1])
                print(len(numpids))
                print(len(pset))
                annotations=newannotations
            else:    # use yolo dddduke
                for i,ann_dict in enumerate(annotations):
                    img_name=ann_dict['img_name']
                    root='/home/cv7609/zjh/ps_raw/ssm_cluster1_with_duke/Image'
                    img_path=osp.join(root,img_name)
                    annotations[i]['img_path']=img_path
        if use_2635_clu==True:
            annotations=unpickle('../yolo_feats/anno10535-2635+1.pkl')
            unio_prw=True #gai pids + 482
            if unio_prw:
                for i in range(len(annotations)):
                    for j,id in enumerate(annotations[i]['pids']):
                        if id!=5555:
                            annotations[i]['pids'][j]=id+483
                        # pdb.set_trace()
        if use_multi_label==True:
            annotations = unpickle('../yolo_feats/anno_multi_label_5_0.5.pkl')
            for i, ann_dict in enumerate(annotations):
                root = '/home/cv7609/zjh/ps_raw/ssm_cluster1_with_duke/Image'
                img_name = ann_dict['img_name']
                img_path = osp.join(root, img_name)
                annotations[i]['img_path']=img_path
        return annotations
