import os.path

import torch
from torch.utils import data as tdata
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor

from PIL import Image
import numpy as np

import database


class FileDataset(tdata.Dataset):
    def __init__(self, data_files: list, labels):
        self.data_files = data_files
        self.labels = labels
        for filename in data_files:
            if not os.path.exists(filename):
                raise FileNotFoundError(f"No such file: {filename}.")

    def __len__(self):
        return 2 * len(self.labels)

    def __getitem__(self, img_id):
        idx = img_id % len(self.labels)
        filename = self.data_files[idx]
        label = self.labels[idx]
        img = Image.open(filename).convert('L')
        img = img.resize((28, 28), Image.LANCZOS)
        img = np.array(img).reshape((1, 28, 28)).astype('float32')
        img = (img - np.min(img)) / (np.max(img) - np.min(img)) + 1e-5
        if img_id >= len(self.labels):
            img = 1 - img
        return img, label


def get_data_from_db(dataset_name):
    filenames, labels = database.find_data_by_dataset_name(dataset_name)
    return FileDataset(filenames, labels)


def split_int(n, ratios):
    n_list = []
    length = len(ratios)
    for i, r in enumerate(ratios):
        if i == length - 1:
            break
        n_list.append(int(n * r))
    n_list.append(n - sum(n_list))
    return n_list


def split_dataset(dataset, train_ratio, valid_ratio):
    test_ratio = 1 - valid_ratio - train_ratio
    lengths = split_int(len(dataset), [train_ratio, valid_ratio, test_ratio])
    return tdata.random_split(dataset, lengths)


def get_dataset(dataset_id, train_ratio, valid_ratio):
    if dataset_id == 1:
        dataset = datasets.MNIST(
            root="mnist-data",
            train=True,
            download=True,
            transform=ToTensor(),

        )
    else:
        dataset = get_data_from_db(dataset_id)
    train_dataset, valid_dataset, test_dataset = split_dataset(dataset, train_ratio, valid_ratio)
    return train_dataset, valid_dataset, test_dataset


def get_dataloader(dataset, batch_size=32):
    train_dataset, valid_dataset, test_dataset = dataset
    return (DataLoader(train_dataset, batch_size=batch_size, shuffle=True),
            DataLoader(valid_dataset, batch_size=batch_size, shuffle=False),
            DataLoader(test_dataset, batch_size=batch_size, shuffle=False))
