import argparse
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms

from utils import *
from trainer import Trainer
from ImageDataset import UnlabeledImageDataset, UnlabeledImageDatasetContrastive




def get_task_dataset(dataset_name, data_dir):
    
    mean = [0.4056, 0.4292, 0.4032]
    std = [0.2028, 0.1897, 0.1908]

    normalize = {}
    normalize['mean'] = mean
    normalize['std'] = std

    train_transform = transforms.Compose(
        [
            transforms.ToTensor(),
            transforms.RandomResizedCrop(size=(224,224), scale=(0.8,1.0), antialias=True),
            transforms.RandomHorizontalFlip(),
            transforms.Normalize(mean, std),
        ]
    )
    KD_transform = transforms.Compose(
        [
            transforms.ToTensor(),
            transforms.RandomResizedCrop(size=(224,224), scale=(0.8,1.0), antialias=True),
            transforms.RandomHorizontalFlip(),
            transforms.Normalize(mean, std),
        ]
    )

    transformations = {
        "train": train_transform,
        "val": transforms.Compose([transforms.ToTensor(),transforms.CenterCrop(size=(224,224)),transforms.Normalize(mean, std)]),
        "test": transforms.Compose([transforms.ToTensor(),transforms.CenterCrop(size=(224,224)),transforms.Normalize(mean, std)]),
    }

    
    list_train_paths = file_to_list(map_dataset_to_file(dataset_name,'train'))
    list_test_paths = file_to_list(map_dataset_to_file(dataset_name,'test'))
    list_val_paths = file_to_list(map_dataset_to_file(dataset_name,'val'))
      
    
    ds = {
        'train': UnlabeledImageDatasetContrastive(list_train_paths, [transformations['train'], KD_transform], data_dir),#[BYOLView1Transform(input_size=224, normalize=normalize), BYOLView2Transform(input_size=224, normalize=normalize)]), ##[BYOLView1Transform(input_size=224, normalize=normalize), BYOLView2Transform(input_size=224, normalize=normalize)]),# #[BYOLView1Transform(input_size=224, normalize=normalize), BYOLView2Transform(input_size=224, normalize=normalize)]),#
        'test': UnlabeledImageDataset(list_test_paths, transformations['test'], data_dir),
        'val': UnlabeledImageDataset(list_val_paths, transformations['val'], data_dir)
    }

    return ds


def get_memory_dataset(path, data_dir):
    
    mean = [0.4056, 0.4292, 0.4032]
    std = [0.2028, 0.1897, 0.1908]

    normalize = {}
    normalize['mean'] = mean
    normalize['std'] = std

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.RandomResizedCrop(size=(224,224), scale=(0.8,1.0), antialias=True),
        transforms.RandomHorizontalFlip(),
        transforms.Normalize(mean, std),
    ])
    KD_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.RandomResizedCrop(size=(224,224), scale=(0.8,1.0), antialias=True),
        transforms.RandomHorizontalFlip(),
        transforms.Normalize(mean, std),
    ])
    memory_ds = UnlabeledImageDatasetContrastive(file_to_list(path),[transform,KD_transform], data_dir)
    
    return memory_ds

    

def main():
    parser = argparse.ArgumentParser(description='Continual Self-Supervised learning')
    parser.add_argument('--experiment_name', default='CoSMAE', type=str)
    parser.add_argument('--data_mixup', default='beta', type=str, help='how to select lambda1 for data mixup', choices=[None, 'beta', 'unif'])
    parser.add_argument('--model_mixup_KD', default='beta', type=str, help='how to select lambda2 for model mixup strategy', choices=[None, 'beta', 'unif'])
    parser.add_argument('--KD_param', default=0.1, type=float, help='parameter (beta) to determine contribution of KD loss to total loss')
    parser.add_argument('--num_trials', default=1, type=int)
    parser.add_argument('--trial_id', default=0, type=int, help='used for naming saved weights')
    parser.add_argument('--batch_size', default=128, type=int)
    parser.add_argument('--num_epochs', default=300, type=int)
    parser.add_argument('--learning_rate', default=1e-3, type=float)
    parser.add_argument('--num_workers', default=8, type=int)
    parser.add_argument('--patch_size', default=16, type=int)
    parser.add_argument('--data_dir', default='/data/lamoe', help='path to the data directory')
    parser.add_argument('--masking_ratio', default=0.75, type=float)
    parser.add_argument('--warmup', default=10, type=int, help='number of epochs used for the linear warmup of learning rate scheduler')
    parser.add_argument('--val_epochs', default=10, type=int, help='calculates validation loss every x epochs')
    parser.add_argument('--start_validation', default=200, type=int, help='starts calculation of validation loss from epoch x on')
    parser.add_argument('--best_val', default=False, type=bool, help='use model with best validation performance')
    parser.add_argument('--load_pretrained_weights', default=None, help='load weights from given path')
    parser.add_argument('--tasks', nargs='+', default=['US3D','UAVID', 'Potsdam', 'TreeSatAI'], help='list of CL tasks')
    parser.add_argument('--memory', nargs='+', default=['paths/JAX_memory_1000.txt','paths/JAX_UAVID_memory_1000.txt','paths/JAX_UAVID_Potsdam_memory_1000.txt'], help='list of paths for memory buffer samples')

    args = parser.parse_args()

    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    torch.set_num_threads(8)
    torch.set_float32_matmul_precision("medium")


    img_size = (3,224,224)
    



    trials = args.num_trials#NUM_TRIALS#2

    seeds = [42,43,44,45,46,47,48,49,50,51]

    for i in range(trials):
        trial = i + args.trial_id
        torch.manual_seed(seeds[trial])

        model_dict = {}
        model = get_backbone(img_size[1], img_size[0], patch_size=args.patch_size)
        ssl_model = get_ssl_model(model, 'CoSMAE', args.masking_ratio)
        model_dict['train'] = ssl_model

        first_check_sum = get_model_checksum(ssl_model)

        ssl_model.to(device)

        name = args.experiment_name
        
        if args.load_pretrained_weights != None:
            


            if args.best_val:
                pretraining_weights_file = args.load_pretrained_weights + str(trial) + '_best.pt'
                pretrained_checkpoint = torch.load('weights/' + pretraining_weights_file, map_location=torch.device(device))
                #pretrained_checkpoint = torch.load('{}_best.pt'.format(pretrain_name), map_location=torch.device(device))
                pretrained_state_dict = pretrained_checkpoint['model_state_dict']
                print('best epoch:', pretrained_checkpoint['epoch'])
                ssl_model.load_state_dict(pretrained_state_dict)
            else:
                pretraining_weights_file = args.load_pretrained_weights + str(trial) + '.pt'
                get_pretrained_weights(ssl_model, pretraining_weights_file, device)
            if args.tasks[0] == 'UAVID':
                name = name + '_' + 'US3D' #assuming we loaded pre-trained weights from US3D
            #elif TASKS[0] == 'Potsdam':
            #    name = name + '_' + 'US3D_UAVID' #assuming we loaded pre-trained weights from US3D_UAVID
            #elif TASKS[0] == 'TreeSatAI':
            #    name = name + '_' + 'US3D_UAVID_Potsdam' #assuming we loaded pre-trained weights from US3D_UAVID_Potsdam
            #old_name = name
        
        second_check_sum = get_model_checksum(ssl_model)
        if first_check_sum == second_check_sum:
            print('pretrained weights not loaded')

        

        #tasks = TASKS
        #ssl_optim = torch.optim.AdamW(ssl_model.parameters(), lr=lr)
        

        for task in range(len(args.tasks)):
            ssl_optim = torch.optim.AdamW(ssl_model.parameters(), lr=args.learning_rate)

            
            ds = get_task_dataset(args.tasks[task], args.data_dir)
            

            train_dl = DataLoader(
                ds["train"],
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.num_workers,
                pin_memory=True
            )
            val_dl = DataLoader(
                ds["val"],
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.num_workers,
                pin_memory=True
            )
            test_dl = DataLoader(
                ds["val"],
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.num_workers,
                pin_memory=True
            )
            dataloaders = {'train': train_dl,
                        'val': val_dl,
                        'test': test_dl,
                        'memory': None}
            
            
            mem_task = None
            if args.load_pretrained_weights and args.data_mixup!=None:
                mem_task = task
            elif args.data_mixup!=None and task>0:
                mem_task = task - 1

            if mem_task != None:
                print('init memory loader...')
                mem_ds = get_memory_dataset(args.memory[mem_task], args.data_dir)
                mem_dl = DataLoader(
                        mem_ds,
                        batch_size=args.batch_size,
                        shuffle=True,
                        num_workers=args.num_workers,
                        pin_memory=True
                )
                dataloaders['memory'] = mem_dl

            use_kd = args.model_mixup_KD#KD
            use_mixup = args.data_mixup#MIXUP
            if args.load_pretrained_weights == None and task == 0: 
                use_kd = None#False
                use_mixup = None#False
                

            if use_kd:
                old_encoder = get_old_encoder(ssl_model, device, patch_size=args.patch_size)
                model_dict['KD_encoder'] = old_encoder
                
            
            if task>0:
                #name is still from previous task
                if args.best_val:
                    pretrained_checkpoint = torch.load('weights/{}_pretraining_trial_{}_best.pt'.format(name,trial), map_location=torch.device(device))
                    #pretrained_checkpoint = torch.load('{}_best.pt'.format(pretrain_name), map_location=torch.device(device))
                    pretrained_state_dict = pretrained_checkpoint['model_state_dict']
                    print('best epoch:', pretrained_checkpoint['epoch'])
                    ssl_model.load_state_dict(pretrained_state_dict)
            
            
            name = name + '_' + str(args.tasks[task])
            
            

            
            
            #warmup=args.warmup
            linear_scheduler = torch.optim.lr_scheduler.LinearLR(optimizer=ssl_optim,start_factor=0.1,total_iters=args.warmup)
            cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=ssl_optim, T_max=(args.num_epochs-args.warmup))
            lr_scheduler = torch.optim.lr_scheduler.SequentialLR(optimizer=ssl_optim, schedulers=[linear_scheduler,cosine_scheduler],milestones=[args.warmup])
            #lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=ssl_optim, T_max=(num_epochs))
            
            logger = SummaryWriter('logs/{}_trial_{}'.format(name, trial))
            
            ssl_trainer = Trainer(model_dict, device, use_kd, use_mixup, KD_hyperparam=args.KD_param, patch_size=args.patch_size)
            
            print("\nTrial {} === Task: {} ===".format(trial, args.tasks[task]))
            ssl_trainer.train(dataloaders=dataloaders, optimizer=ssl_optim, scheduler=lr_scheduler, writer=logger, num_epochs=args.num_epochs, best_eval=args.best_val, eval=10, start_eval=200, exp_name=name, trial=trial)
            torch.save(ssl_model.state_dict(), 'weights/{}_pretraining_trial_{}.pt'.format(name, trial))
            logger.flush()
            logger.close()


        print("\n=== Training finished ===")

    

if __name__ == "__main__":
    main()