import os.path as osp
import re

import numpy as np
from scipy.io import loadmat

from .base import BaseDataset
from PIL import Image
import torch
ins_min,ins_max = 64,190

class PRW(BaseDataset):
    def __init__(self, root, transforms, split):
        self.name = "PRW"
        self.img_prefix = osp.join(root, "frames")
        super(PRW, self).__init__(root, transforms, split)

    def _get_cam_id(self, img_name):
        match = re.search(r"c\d", img_name).group().replace("c", "")
        return int(match)

    def _load_queries(self):
        query_info = osp.join(self.root, "query_info.txt")
        with open(query_info, "rb") as f:
            raw = f.readlines()

        queries = []
        for line in raw:
            linelist = str(line, "utf-8").split(" ")
            pid = int(linelist[0])
            x, y, w, h = (
                float(linelist[1]),
                float(linelist[2]),
                float(linelist[3]),
                float(linelist[4]),
            )
            roi = np.array([x, y, x + w, y + h]).astype(np.int32)
            roi = np.clip(roi, 0, None)  # several coordinates are negative
            img_name = linelist[5][:-2] + ".jpg"
            queries.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": roi[np.newaxis, :],
                    "pids": np.array([pid]),
                    "cam_id": self._get_cam_id(img_name),
                }
            )
        return queries

    def _load_split_img_names(self):
        """
        Load the image names for the specific split.
        """
        assert self.split in ("train", "gallery")
        if self.split == "train":
            imgs = loadmat(osp.join(self.root, "frame_train.mat"))["img_index_train"]
        else:
            imgs = loadmat(osp.join(self.root, "frame_test.mat"))["img_index_test"]
        return [img[0][0] + ".jpg" for img in imgs]

    def _load_annotations(self):
        if self.split == "query":
            return self._load_queries()


        annotations = []
        wid=0
        wid_5=0
        imgs = self._load_split_img_names()
        for img_name in imgs:
            anno_path = osp.join(self.root, "annotations", img_name)
            anno = loadmat(anno_path)
            box_key = "box_new"
            if box_key not in anno.keys():
                box_key = "anno_file"
            if box_key not in anno.keys():
                box_key = "anno_previous"

            rois = anno[box_key][:, 1:]
            ids = anno[box_key][:, 0]
            wid += sum(ids!=-2)
            rois = np.clip(rois, 0, None)  # several coordinates are negative

            assert len(rois) == len(ids)

            rois[:, 2:] += rois[:, :2]
            ids[ids == -2] = 5555  # assign pid = 5555 for unlabeled people
            wid_5 += sum(ids != 5555)
            annotations.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": rois.astype(np.int32),
                    # FIXME: (training pids) 1, 2,..., 478, 480, 481, 482, 483, 932, 5555
                    "pids": ids.astype(np.int32),
                    "cam_id": self._get_cam_id(img_name),
                }
            )
        relabel_ues=True
        nwid=0
        if relabel_ues and self.split=='train':
            pidset=set()
            for dict in annotations:
                pids=dict['pids']
                for id in pids:
                    pidset.add(id)
            pidlist=sorted(list(pidset))
            relabel={old:new+1 for new,old in enumerate(pidlist)}

            for i in range(len(annotations)):
                pids=annotations[i]['pids']
                re_pids=[]
                for id in pids:
                    if id==5555:
                        re_pids.append(id)
                    else:
                        nwid+=1
                        re_pids.append(relabel[id])
                annotations[i]['pids']=re_pids
        print('with id nums',nwid)
        return annotations

class PRW_two(BaseDataset):
    def __init__(self, root, transforms, split):
        self.name = "PRW"
        self.img_prefix = osp.join(root, "frames")
        super(PRW_two, self).__init__(root, transforms, split)

    def _get_cam_id(self, img_name):
        match = re.search(r"c\d", img_name).group().replace("c", "")
        return int(match)

    def _load_queries(self):
        query_info = osp.join(self.root, "query_info.txt")
        with open(query_info, "rb") as f:
            raw = f.readlines()

        queries = []
        for line in raw:
            linelist = str(line, "utf-8").split(" ")
            pid = int(linelist[0])
            x, y, w, h = (
                float(linelist[1]),
                float(linelist[2]),
                float(linelist[3]),
                float(linelist[4]),
            )
            roi = np.array([x, y, x + w, y + h]).astype(np.int32)
            roi = np.clip(roi, 0, None)  # several coordinates are negative
            img_name = linelist[5][:-2] + ".jpg"
            queries.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": roi[np.newaxis, :],
                    "pids": np.array([pid]),
                    "cam_id": self._get_cam_id(img_name),
                }
            )
        return queries

    def _load_split_img_names(self):
        """
        Load the image names for the specific split.
        """
        assert self.split in ("train", "gallery")
        if self.split == "train":
            imgs = loadmat(osp.join(self.root, "frame_train.mat"))["img_index_train"]
        else:
            imgs = loadmat(osp.join(self.root, "frame_test.mat"))["img_index_test"]
        return [img[0][0] + ".jpg" for img in imgs]

    def _load_annotations(self):
        if self.split == "query":
            return self._load_queries()


        annotations = []
        wid=0
        wid_5=0
        imgs = self._load_split_img_names()
        for img_name in imgs:
            anno_path = osp.join(self.root, "annotations", img_name)
            anno = loadmat(anno_path)
            box_key = "box_new"
            if box_key not in anno.keys():
                box_key = "anno_file"
            if box_key not in anno.keys():
                box_key = "anno_previous"

            rois = anno[box_key][:, 1:]
            ids = anno[box_key][:, 0]
            wid += sum(ids!=-2)
            rois = np.clip(rois, 0, None)  # several coordinates are negative

            assert len(rois) == len(ids)

            rois[:, 2:] += rois[:, :2]
            ids[ids == -2] = 5555  # assign pid = 5555 for unlabeled people
            wid_5 += sum(ids != 5555)
            annotations.append(
                {
                    "img_name": img_name,
                    "img_path": osp.join(self.img_prefix, img_name),
                    "boxes": rois.astype(np.int32),
                    # FIXME: (training pids) 1, 2,..., 478, 480, 481, 482, 483, 932, 5555
                    "pids": ids.astype(np.int32),
                    "cam_id": self._get_cam_id(img_name),
                }
            )
        relabel_ues=True
        nwid=0
        if relabel_ues and self.split=='train':
            pidset=set()
            for dict in annotations:
                pids=dict['pids']
                for id in pids:
                    pidset.add(id)
            pidlist=sorted(list(pidset))
            relabel={old:new+1 for new,old in enumerate(pidlist)}

            for i in range(len(annotations)):
                pids=annotations[i]['pids']
                re_pids=[]
                for id in pids:
                    if id==5555:
                        re_pids.append(id)
                    else:
                        nwid+=1
                        re_pids.append(relabel[id])
                annotations[i]['pids']=re_pids
        print('with id nums',nwid)
        return annotations
    def __getitem__(self, index):
        '''
        张建贺 2021.12.25 更改输入增加 crop box imgs
        '''
        anno = self.annotations[index]
        img = Image.open(anno["img_path"]).convert("RGB")

        ##
        img_crops = [img.crop(box).resize((ins_min,ins_max)) for box in anno['boxes']]
        ## 
        
        boxes = torch.as_tensor(anno["boxes"], dtype=torch.float32)
        use_multi_label =True
        if use_multi_label==True:
            # print(anno['pids'])
            labels = torch.as_tensor(anno["pids"])
        else:
            labels = torch.as_tensor(anno["pids"], dtype=torch.int64)
        
        
        target = {"img_name": anno["img_name"], "boxes": boxes, "labels": labels,'img_crops':img_crops}
        if self.transforms is not None:
            img, target = self.transforms(img, target)
            ##
        return img, target