import os
import sys
import cv2
import numpy as np

sys.path.extend(['..'])

import torch
import torch.utils.data
import torch.utils.data as data_utils
import torchvision.transforms as transforms
import pickle as pkl

from utils.data_utils import *
import os
from glob import glob
# Dataset (Input Pipeline)
class CustomDataset(data_utils.Dataset):
    """
    Custom dataset
    Arguments:
    Returns:
    """

    def __init__(self, config, is_training=True):
        self.config = config
        self.is_training = is_training
        assert isinstance(config.data_file,list)
        self.im_files= []
        self.labels=[]
        for file in config.data_file:
            with open(file,'r') as f:
                lines=f.read().strip().split('\n')
                for line in lines:
                    im_file,label=line.strip().split('\t')
                    self.im_files.append(im_file)
                    self.labels.append(label)
        with open(config.dict_file,'r') as f:
            maps={}
            lines=f.read().strip().split('\n')
            for i in range(len(lines)):
                maps[lines[i]]=i+1
            self.char_map = maps

        self.transforms = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5,), (0.5,))
        ])

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        item = {}

        # Get image and label
        im_file=self.im_files[idx]
        im=cv2.imread(im_file)
        # h,w,c=im.shape
        # img=cv2.resize(im,(int(w*32/h),32))
        img = cv2.resize(im, (128, 32))
        img = self.transforms(img / 255.)

        lab=[]
        for cc in self.labels[idx]:
            lab.append(self.char_map[cc])

        item['img'] = img.float()
        item['label'] = torch.tensor(np.array(lab,int))

        return item


class DataLoader:
    def __init__(self, config):
        self.config = config

    def create_train_loader(self):
        self.dataset = CustomDataset(config=self.config)
        return torch.utils.data.DataLoader(
            self.dataset, batch_size=self.config.batch_size, shuffle=True,
            num_workers=2, pin_memory=True, collate_fn=self.batch_collate)

    def batch_collate(self, batch):
        items = {}
        #max_w = max([item['img'].shape[2] for item in batch])

        # Remove channel dimension, swap height and width, pad widths and return to the original shape
        # items['img'] = pad_sequence([item['img'].view(32*3,-1).permute(1, 0) for item in batch],
        #                             batch_first=True,
        #                             padding_value=1.)
        # items['img'] = items['img'].permute(0, 2, 1).unsqueeze(1)
        items['img']=torch.cat([item['img'].unsqueeze(0) for item in batch],dim=0)
        items['label_len'] = torch.tensor([len(item['label']) for item in batch])
        items['label'] = pad_sequence([item['label'] for item in batch], batch_first=True, padding_value=0)

        return items
