from PIL import Image
import json
import numpy as np
import torch
import torch.utils.data
import datasets.transforms as T
import CLIP.clip as Clip

class BaseDataloader(torch.utils.data.Dataset):

    def __init__(self, img_set, img_folder, anno_file, transforms, num_queries, dataset_file, clip_model='RN50'):
        super(BaseDataloader, self).__init__()
        self.dataset_file = dataset_file
        self.img_set = img_set
        self.img_folder = img_folder
        print(anno_file)
        with open(anno_file, 'r') as f:
            self.annotations = json.load(f)
        self._transforms = transforms

        self.num_queries = num_queries

        self._valid_obj_ids = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
                               14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
                               24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
                               37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
                               48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
                               58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
                               72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
                               82, 84, 85, 86, 87, 88, 89, 90)
        self._valid_verb_ids = None
        device = "cuda" if torch.cuda.is_available() else "cpu"
        _, self.clip_preprocess = Clip.load(clip_model, device)
        
    def __len__(self):
        return len(self.annotations)

    def load_img(self, idx): 
        return self.annotations[idx]

    def __getitem__(self, idx):
        img_anno = self.load_img(idx)
        
        img = Image.open(self.img_folder / img_anno['file_name']).convert('RGB')
        
        if self.img_set == 'train' and len(img_anno['annotations']) > self.num_queries:
            img_anno['annotations'] = img_anno['annotations'][:self.num_queries]
        
        target = {}
        target['filename'] = img_anno['file_name']
        target['id'] = idx
        target['img_id'] = int(img_anno['file_name'].rstrip('.jpg').split('_')[2])
        img, target = self.get_od(img, idx, img_anno, target)
        target = self.get_hoi(img_anno, target)
        return img, target


    def get_od(self,img, idx, img_anno, target):
        w, h = img.size
        #   将一张图片bbox集合（k, 4）
        boxes = [obj['bbox'] for obj in img_anno['annotations']]
        boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)

        if self.img_set == 'train':
            # 增加索引，用于确认图像转换后哪些方框被保留 (k, 2)
            classes = [(i, self._valid_obj_ids.index(obj['category_id'])) for i, obj in enumerate(img_anno['annotations'])]
        else:
            classes = [self._valid_obj_ids.index(obj['category_id']) for obj in img_anno['annotations']]
        classes = torch.tensor(classes, dtype=torch.int64)

        target['orig_size'] = torch.as_tensor([int(h), int(w)])
        target['size'] = torch.as_tensor([int(h), int(w)])
        
        if self.img_set == 'train':
            boxes[:, 0::2].clamp_(min=0, max=w)
            boxes[:, 1::2].clamp_(min=0, max=h)
            keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
            boxes = boxes[keep]
            classes = classes[keep]
            #   (k, 4) 
            target['boxes'] = boxes
            #   (k, 2)
            target['labels'] = classes
            #   (k, )
            target['iscrowd'] = torch.tensor([0 for _ in range(boxes.shape[0])])
            #   (k, )
            target['area'] = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
            #   bbox从整数变为小数
            if self._transforms is not None:
                img, target = self._transforms(img, target)
        else:   #test
            #   (k, 4) (k, ) (k,) (k, )
            target['boxes'] = boxes
            target['labels'] = classes

            if self._transforms is not None:#480, 640 -> 1066, 800
                img, _ = self._transforms(img, None)
        return img, target

    def get_hoi(self, img_anno, target):
        if self.img_set == 'train':
            kept_box_indices = [label[0] for label in target['labels']]
            
            target['labels'] = target['labels'][:, 1]
            obj_labels, verb_labels, sub_boxes, obj_boxes = [], [], [], []
            
            sub_obj_pairs = []
            hoi_labels = []
            for hoi in img_anno['hoi_annotation']:  #   一个动作交互
                if hoi['subject_id'] not in kept_box_indices or \
                  (hoi['object_id'] != -1 and hoi['object_id'] not in kept_box_indices):
                    continue

                if hoi['object_id'] == -1:
                    verb_obj_pair = (self._valid_verb_ids.index(hoi['category_id']), 80)
                else:
                    verb_obj_pair = (self._valid_verb_ids.index(hoi['category_id']),
                                     target['labels'][kept_box_indices.index(hoi['object_id'])])

                if verb_obj_pair not in self.text_label_ids:
                    continue

                sub_obj_pair = (hoi['subject_id'], hoi['object_id'])
                if sub_obj_pair in sub_obj_pairs:
                    # verb是多目标
                    verb_labels[sub_obj_pairs.index(sub_obj_pair)][self._valid_verb_ids.index(hoi['category_id'])] = 1
                    try:
                        hoi_labels[sub_obj_pairs.index(sub_obj_pair)][self.text_label_ids.index(verb_obj_pair)] = 1
                    except Exception:
                        print("error")
                else:
                    sub_obj_pairs.append(sub_obj_pair)
                    #   obj_labels(kh, )
                    if hoi['object_id'] == -1:
                        obj_labels.append(torch.tensor(len(self._valid_obj_ids)))   #80-即没有物体
                    else:
                        obj_labels.append(target['labels'][kept_box_indices.index(hoi['object_id'])])
                    
                    # #   verb_labels(kh, 29)
                    verb_label = [0 for _ in range(len(self._valid_verb_ids))]
                    verb_label[self._valid_verb_ids.index(hoi['category_id'])] = 1
                    verb_labels.append(verb_label)

                    hoi_label = [0] * len(self.text_label_ids)
                    hoi_label[self.text_label_ids.index(verb_obj_pair)] = 1
             
                    #   sub_boxes(kh, 4)
                    sub_box = target['boxes'][kept_box_indices.index(hoi['subject_id'])]
                    sub_boxes.append(sub_box)
                    
                    #   obj_boxes
                    if hoi['object_id'] == -1:
                        #obj_box = torch.zeros((4,), dtype=torch.float32)#没有物体bbox给4个零
                        obj_box = sub_box.clone()
                    else:
                        obj_box = target['boxes'][kept_box_indices.index(hoi['object_id'])]
                    obj_boxes.append(obj_box)
                    hoi_labels.append(hoi_label)
             
            if len(sub_obj_pairs) != 0:
                obj_labels =  torch.stack(obj_labels)
                verb_labels = torch.as_tensor(verb_labels, dtype=torch.float32)
                sub_boxes = torch.stack(sub_boxes)
                obj_boxes =  torch.stack(obj_boxes)
                matching_labels = torch.ones_like(obj_labels, dtype=torch.int64)
            
                uni_sub_boxes, uni_sub_no = np.unique(sub_boxes, return_inverse=True, axis=0)
                uni_obj_label_box, uni_obj_no = np.unique(torch.cat([obj_labels.unsqueeze(-1), obj_boxes], -1), return_inverse=True, axis=0)

                uni_sub_boxes = torch.tensor(uni_sub_boxes)
                uni_obj_labels = torch.tensor(uni_obj_label_box[:, 0]).long()
                uni_obj_boxes = torch.tensor(uni_obj_label_box[:, 1:])
                uni_sub_no, uni_obj_no = torch.tensor(uni_sub_no).long(), torch.tensor(uni_obj_no).long()
                
                
                
            if len(sub_obj_pairs) == 0:
                target['sub_boxes'] = torch.zeros((0, 4), dtype=torch.float32)
                target['matching_labels'] = torch.zeros((0,), dtype=torch.int64)
                target['obj_boxes'] = torch.zeros((0, 4), dtype=torch.float32)
                target['obj_labels'] = torch.zeros((0,), dtype=torch.int64)
                target['verb_labels'] = torch.zeros((0, len(self._valid_verb_ids)), dtype=torch.float32)
                
                target['uni_sub_boxes'] =  torch.zeros((0, 4), dtype=torch.float32)
                target['uni_obj_boxes'] = torch.zeros((0, 4), dtype=torch.float32)
                target['uni_obj_labels'] = torch.zeros((0,), dtype=torch.int64)
                target['uni_obj_no'] = torch.zeros((0,), dtype=torch.int64)
                target['uni_sub_no'] = torch.zeros((0,), dtype=torch.int64)
                
                target['hoi_labels'] = torch.zeros((0, len(self.text_label_ids)), dtype=torch.float32)
            else:
                target['obj_labels'] = obj_labels
                target['obj_boxes'] = obj_boxes
                
                target['sub_boxes'] = sub_boxes
                target['matching_labels'] = matching_labels
                target['verb_labels'] = verb_labels
                
                target['uni_sub_boxes'] = uni_sub_boxes
                target['uni_obj_boxes'] = uni_obj_boxes
                target['uni_obj_labels'] = uni_obj_labels
                target['uni_obj_no'] = uni_obj_no
                target['uni_sub_no'] = uni_sub_no
                
                target['hoi_labels'] = torch.as_tensor(hoi_labels, dtype=torch.float32)
        else:
            hois = []
            for hoi in img_anno['hoi_annotation']:
                hois.append((hoi['subject_id'], 
                             hoi['object_id'], 
                             self._valid_verb_ids.index(hoi['category_id'])))
            target['hois'] = torch.as_tensor(hois, dtype=torch.int64)            
        return target

    def load_correct_mat(self, path):
        self.correct_mat = np.load(path)


# Add color jitter to coco transforms
def make_transforms(image_set, args):

    normalize = T.Compose([
        T.ToTensor(),
        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]

    if image_set == 'train':
        if args.large_scale_jitter:
            return T.Compose([
                T.RandomHorizontalFlip(),
                T.LargeScaleJitter(output_size=1333, aug_scale_min=0.3, aug_scale_max=2.0),
                T.RandomDistortion(0.5, 0.5, 0.5, 0.5),
                normalize,
            ])
        else:
            return T.Compose([
                T.RandomHorizontalFlip(),
                T.ColorJitter(.4, .4, .4),
                T.RandomSelect(
                    T.RandomResize(scales, max_size=1333),
                    T.Compose([
                        T.RandomResize([400, 500, 600]),
                        T.RandomSizeCrop(384, 600),
                        T.RandomResize(scales, max_size=1333),
                    ])
                ),
                normalize,
            ])
        
    if image_set == 'val':
        return T.Compose([
            T.RandomResize([800], max_size=1333),
            normalize, ])


    raise ValueError(f'unknown {image_set}')

