import random
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import cv2
import math


class MultiTaskDataset(Dataset):
    def __init__(self, meta_df, transforms=None, *args, **kwargs):
        self.meta_df = meta_df
        self.transforms = transforms

    def __len__(self):
        return self.meta_df.shape[0]

    def __getitem__(self, idx):
        path = self.meta_df.iloc[idx].path
        task = self.meta_df.iloc[idx].task
        target = self.meta_df.iloc[idx].target

        img = cv2.imread(path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        
        if self.transforms is not None:
            transformed = self.transforms(image=img)
            img = transformed['image']
            
        return {
            'image': img,
            'task': task,
            'target': target
        }



def prepare_loaders(df_train, df_valid, CONFIG, data_transforms):
    
    train_dataset = MultiTaskDataset(df_train, transforms=data_transforms["train"])
    valid_dataset = MultiTaskDataset(df_valid, transforms=data_transforms["valid"])

    train_loader = DataLoader(train_dataset, batch_size=CONFIG['train_batch_size'], 
                              num_workers=CONFIG['num_workers'], shuffle=True, pin_memory=True, drop_last=True)
    valid_loader = DataLoader(valid_dataset, batch_size=CONFIG['valid_batch_size'], 
                              num_workers=CONFIG['num_workers'], shuffle=False, pin_memory=True)
    
    return train_loader, valid_loader


def data_count(count, split, id):
    return count // split + (id < count % split)


def select_data(df, config, balance_type=False):
    tasks = config['tasks']
    df['id'] = range(0, len(df))
    df = df.sample(frac=1, random_state=config['seed']).reset_index(drop=True)

    data_index = []
    for task in tasks:
        task_df = df[df['task'] == task]
        if balance_type:
            type_list = {}
            for idx, val in zip(task_df['id'], task_df['target']):
                if val not in type_list.keys():
                    type_list[val] = []
                type_list[val].append(idx)
            data_per_task = config['data_per_task']
            for i, data_list in enumerate(type_list.values()):
                type_count = data_count(data_per_task, len(type_list), i)
                data_list = (data_list * (type_count // len(data_list) + 1))[:type_count]
                data_index.extend(data_list)
        else:
            data_index.extend(list(task_df['id']))

    if balance_type:
        assert(len(data_index) == data_per_task * len(tasks))
    return data_index

