
import os
import yaml
import time
import torch
import typing
import logging
import datetime
import torchvision

import torch.nn as nn
import torch.optim as optim
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import interface.utils as Iutils


logger = logging.getLogger()


class CustomImageDataset(Dataset):
    """Custom dataset class for loading images."""

    def __init__(
            self,
            images: typing.List[str],
            labels: typing.List[int],
            transform: torchvision.transforms.Compose = None) -> None:
        """
        Initialize CustomImageDataset.

        Args:
                images (List[str]): List of image paths.
                labels (List[int]): List of corresponding labels.
                transform (transforms.Compose, optional): Optional transform to be applied to the image.
        """
        self.images = images
        self.labels = labels
        self.transform = transform

    def __len__(self) -> int:
        """
        Return the length of the dataset.

        Returns:
                int: Length of the dataset.
        """
        return len(self.images)

    def __getitem__(self, idx: int) -> typing.Tuple[torch.Tensor, int]:
        """
        Get an item from the dataset.

        Args:
                idx (int): Index of the item.

        Returns:
                Tuple[torch.Tensor, int]: Tuple containing the image and its label.
        """
        image = Image.open(self.images[idx]).convert("RGB")
        if self.transform:
            image = self.transform(image)
        return image, self.labels[idx]


def prepare_data(
        data_dir: str,
        output_classes_path: str,
        image_size: int,
        batch_size: int) -> typing.Tuple[
            typing.Dict[str, DataLoader],
            typing.Dict[str, int], int]:
    """
    Prepare train and test data loaders.

Args:
    data_dir (str): Directory containing the dataset.
    output_classes_path (str): Path to save the file containing class names.
    image_size (int): Size of the input images.
    batch_size (int): Batch size for data loaders.

    Returns:
            Tuple[Dict[str, DataLoader], Dict[str, int], int]: Tuple containing train and test data loaders, dataset sizes, and the number of classes.
    """
    data_transforms = {
        'train': torchvision.transforms.Compose([
            torchvision.transforms.Resize((image_size, image_size)),
            torchvision.transforms.RandomRotation(10),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(
                mean=[0.485, 0.456, 0.406], std=[
                    0.229, 0.224, 0.225])
        ]),
        'val': torchvision.transforms.Compose([
            torchvision.transforms.Resize((image_size, image_size)),
            torchvision.transforms.CenterCrop(image_size),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(
                mean=[0.485, 0.456, 0.406], std=[
                    0.229, 0.224, 0.225])
        ]),
    }

    image_datasets = {x: torchvision.datasets.ImageFolder(
        os.path.join(
            data_dir, x), data_transforms[x]) for x in ['train', 'val']}
    dataloaders = {
        x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for x in
        ['train', 'val']}
    dataset_sizes = {
        x: len(image_datasets[x]) for x in ['train', 'val']}
    logger.info(f'dataset_sizes: {dataset_sizes}')
    num_classes = len(image_datasets['train'].classes)
    class_dict = {idx: cls for idx, cls in enumerate(
        image_datasets['train'].classes)}
    logger.info(f'classes: {class_dict}')
    
    with open(output_classes_path, 'w', encoding='utf-8') as file:
        yaml.dump(class_dict, file, default_flow_style=False,
                  allow_unicode=True)

    return dataloaders, dataset_sizes, num_classes


def train_model(
        model: nn.Module,
        criterion: nn.Module,
        optimizer: optim.Optimizer,
        scheduler: optim.lr_scheduler._LRScheduler,
        dataloaders: typing.Dict[str, DataLoader],
        dataset_sizes: typing.Dict[str, int], device: torch.device,
        num_epochs: int,
        model_save_path: str,
        progress_bar,
        status,
        accuracy_label) -> None:
    """
    Train and evaluate the model.

    Args:
            model (nn.Module): The model to train.
            criterion (nn.Module): The loss function.
            optimizer (optim.Optimizer): The optimizer.
            scheduler (optim.lr_scheduler._LRScheduler): The learning rate scheduler.
            dataloaders (Dict[str, DataLoader]): Dataloaders for training and validation.
            dataset_sizes (Dict[str, int]): Sizes of the training and validation datasets.
            device (torch.device): The device to run the model on.
            num_epochs (int): Number of epochs to train for.
            model_save_path (str): Path to save the model.

    Returns:
            None
    """
    since = time.time()
    best_acc = 0.0

    for epoch in range(num_epochs):
        epoch_start_time = datetime.datetime.now()
        logger.info(f'Epoch {epoch}/{num_epochs - 1}')
        logger.info('-' * 10)

        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_corrects = 0

            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)

                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)

                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data).item()

            if phase == 'train':
                scheduler.step()

            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects / dataset_sizes[phase]

            logger.info(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
            if phase == 'val':
                accuracy_label.value = f"Качество обучения:  <b>{epoch_acc * 100:.2f}%</b>".replace(
                    '.', ',')

            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_params_path = f"{model_save_path}_epoch_{epoch}.pth"
                torch.save(model.state_dict(), best_model_params_path)
        epoch_end_time = datetime.datetime.now()
        progress_bar.value = (epoch + 1) / num_epochs
        epoch_time = (epoch_end_time - epoch_start_time).seconds * \
            ((num_epochs - (epoch + 1)))
        est_hours, est_minutes, est_seconds = (
            epoch_time // 3600), ((epoch_time % 3600) // 60), (epoch_time % 60)
        status.value = f"Осталось {est_hours} часов {est_minutes} минут {est_seconds} секунд / Выполнено  ({progress_bar.value * 100:.0f}%)"
        logger.info('')

    time_elapsed = time.time() - since
    logger.info(
        f'Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s')
    logger.info(f'Best val Acc: {best_acc:.4f}')
    accuracy_label.value = f"Качество обучения:  <b>{best_acc * 100:.2f}%</b>".replace(
        '.', ',')


def start(progress_bar, status, accuracy_label):
    config = Iutils.load_config('train_config.yaml')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")



    dataloaders, dataset_sizes, num_classes = prepare_data(
        config['data_dir'],
        config['output_classes_path'],
        config['image_size'],
        config['batch_size'])




    model = torchvision.models.convnext_base(pretrained=True)
    model.classifier[2] = nn.Linear(
        model.classifier[2].in_features, num_classes)
    model = model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    scheduler = optim.lr_scheduler.StepLR(
        optimizer, step_size=7, gamma=0.1)

    train_model(
        model,
        criterion,
        optimizer,
        scheduler,
        dataloaders,
        dataset_sizes,
        device,
        config['num_epochs'],
        config['model_save_path'],
        progress_bar,
        status,
        accuracy_label)


# output_classes_path: /home/jovyan/notebooks/document-type-detector/models/040724/classes_config.yaml
# image_size: 224
# batch_size: 16
# num_epochs: 200
# model_save_path: /home/jovyan/notebooks/document-type-detector/models/040724/040724
