import os
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from global_var import *
from dataset import *
from torch.utils.data import DataLoader, Subset, DistributedSampler
import torch.nn as nn
from tqdm import tqdm
import torch.multiprocessing as mp
import utils
import signal
import sys

def get_model():
    return ResidualNet()

def get_criterion():
    return nn.CrossEntropyLoss()

def get_optimizer(parameters):
    optimizer = torch.optim.Adam(parameters, lr=learning_rate, weight_decay=1e-5)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
    return optimizer, scheduler

def get_dataset(train_ids, valid_ids):
    # create train and validation datasets
    train_sub = Subset(full_dataset, train_ids)
    valid_sub = Subset(full_dataset, valid_ids)

    train_dataset = MixupDataset(TransformDataset(train_sub, train_tfm))
    valid_dataset = TransformDataset(valid_sub, test_tfm)

    return train_dataset, valid_dataset

def calc_accuracy(output, target):
    return output.argmax(dim=-1).eq(target).float().mean()


if __name__ == "__main__":
    world_size = torch.cuda.device_count()
    writer_path = "."
    log_path = "."
    model_path = "."
    try:
        mp.spawn(utils.train_worker, nprocs=world_size, args=(world_size, get_model, get_criterion, get_optimizer, get_dataset, calc_accuracy, n_epochs, batch_size, patience, False, kfold, dataset_size, writer_path, log_path, model_path))
    except Exception as e:
        print(f"Main process exception: {e}")
    except KeyboardInterrupt:
        print(f"KeyboardInterrupt detected. Terminating process group")
    finally:
        for p in mp.active_children():
            print(f"Terminating process {p.pid}")
            p.terminate()
        sys.exit(1)






        
