from examples.handle_train_data import DataHandler
import os
from dataset.vocab import WordVocab
import torch
from torch.utils.data import Dataset
from backbones.swin_transformer import SwinTransformer
import os
data_root = os.getcwd()
data_root = "/".join((data_root.split("/")[:-2]))+"/"
class Lmdataset(Dataset):
    def __init__(self,all_data):
        super().__init__()
        self.all_data = all_data
    def __getitem__(self, idx):
        return self.all_data[idx]

    def __len__(self):
        return len(self.all_data)

def dataloder(config):
    all_data = []
    data_dir = os.path.join(data_root, config.DATA.DATA_PATH)
    for guid, file in enumerate(sorted(os.listdir(data_dir))):
        if file[-4:] =="json":
            file_path = os.path.join(data_dir, file)
            image_path = file[:-4] + 'png'
            image_path = os.path.join(data_dir, image_path)

            vocab = WordVocab.load_vocab(os.path.join(data_root,config.DATA.VOCAB_PATH))
            data = DataHandler(file_path,image_path, vocab, seq_len=100, encoding="utf-8", on_memory=True)
            all_data.append(data.__getitem__())
    lmdataset = Lmdataset(all_data)
    return torch.utils.data.DataLoader(lmdataset,batch_size = 1)

def val_dataloder(config):
    all_data = []
    data_dir = os.path.join(data_root, config.DATA.VAL_DATA_PATH)
    for guid, file in enumerate(sorted(os.listdir(data_dir))):
        if file[-4:] =="json":
            file_path = os.path.join(data_dir, file)
            image_path = file[:-4] + 'png'
            image_path = os.path.join(data_dir, image_path)

            vocab = WordVocab.load_vocab(os.path.join(data_root,config.DATA.VOCAB_PATH))
            data = DataHandler(file_path,image_path, vocab, seq_len=100, encoding="utf-8", on_memory=True)
            all_data.append(data.__getitem__())
    lmdataset = Lmdataset(all_data)
    return torch.utils.data.DataLoader(lmdataset,batch_size = 1)

if __name__=="__main__":
    from configs.config import _C as config
    train_data = dataloder(config)
    for i,data in enumerate(train_data):
        print(i,data["covered_label"])