from pathlib import Path

#from .my_basedataloader import MyBaseDataloader as BaseDataloader
from .basedataloader import BaseDataloader
from .basedataloader import  make_transforms
import torch
from .vcoco_text_label import *

def build(image_set, args):
    root = Path(args.hoi_path)
    assert root.exists(), f'provided HOI path {root} does not exist'
    PATHS = {
        'train': (root / 'images' / 'train2014', 
                  root / 'annotations' /'cdn_annotations'/ 'trainval_vcoco.json'),
        'val': (root / 'images' / 'val2014', 
                root / 'annotations' /'cdn_annotations'/  'test_vcoco.json')
    }


    img_folder, anno_file = PATHS[image_set]
    dataset = VCOCO(image_set, img_folder, anno_file, 
                    transforms=make_transforms(image_set, args),
                    num_queries=args.num_queries,
                    dataset_file = args.dataset_file,
                    clip_model = args.clip_model)
    #if image_set == 'val':
    CORRECT_MAT_PATH = root / 'annotations' /'cdn_annotations'/ 'corre_vcoco.npy'
    dataset.load_correct_mat(CORRECT_MAT_PATH)
    return dataset



class VCOCO(BaseDataloader):
    def __init__(self, img_set, img_folder, anno_file, transforms, num_queries, dataset_file, clip_model='RN50'):
        super(VCOCO, self).__init__(img_set, img_folder, anno_file, transforms, num_queries, dataset_file, clip_model)
        self._valid_verb_ids = range(29)

        self.text_label_ids = list(vcoco_hoi_text_label.keys())
        
    def __len__(self):
        return len(self.annotations)

    def load_img(self, idx): 
        return self.annotations[idx]

