import argparse
import logging
import sys
from pathlib import Path
from sklearn.metrics import classification_report
from warmup_scheduler import GradualWarmupScheduler
import pytorch_ssim
import pytorch_msssim
from lpips_pytorch import LPIPS
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torch import optim
from torch.utils.data import DataLoader, random_split,ConcatDataset
from tqdm import tqdm

from utils.data_loading import BasicDataset, CarvanaDataset
from utils.dice_score import dice_loss,CrossEntropyLoss_label_smooth
from evaluate import evaluate, evaluate_classification,evaluate_all_folders
from unet import UNet ,ResUNet,SeResUNet,hinet
from utils.transforms  import ToTensor,Compose,RandomRotate,Center_Crop,RandomAffine
from utils.utils import seed_everything,kfolder,split_dataset
from skimage.metrics import structural_similarity as criterion_ssim
from skimage.metrics import peak_signal_noise_ratio as psnr
from skimage.metrics import mean_squared_error as mse

import time


dir_checkpoint = Path('./checkpoints/')

seed_everything(2022)

def criterion_psnr(img1,img2):
    return psnr(img1.cpu().numpy().astype(np.float32),img2.cpu().numpy().astype(np.float32))

def train_net(
              train_set,val_set,
              device,
              experiment,
              model:str ='unet', 
              epochs: int = 5,
              batch_size: int = 1,
              learning_rate: float = 1e-5,
              val_percent: float = 0.1,
              save_checkpoint: bool = True,
              img_scale: float = 0.5,
              amp: bool = False,
              mse :bool = False,
              mae :bool = False,
              ssim :bool = False,
              lpips :bool = False,
              mssim :bool = False,
              model_count : int = 12,
              acceleration : int =4):

    # 1.Change here to adapt to your data
    # n_channels=3 for RGB images
    # n_classes is the number of probabilities you want to get per pixel
    if model =='unet':
        net = nn.ModuleList([UNet(n_channels=1, n_classes=args.classes, bilinear=args.bilinear,device=device) for _ in range(model_count)])
    elif model =='resunet':
        net = nn.ModuleList([ResUNet(n_channels=1, n_classes=args.classes, bilinear=args.bilinear,device=device) for _ in range(model_count)])
    elif model =='seresunet':
        net = nn.ModuleList([SeResUNet(n_channels=1, n_classes=args.classes, bilinear=args.bilinear,device=device) for _ in range(model_count)])
    elif model =='hinet':
        net= nn.ModuleList([hinet.Multi_modal_generator(1,1,32).to(device=device)  for _ in range(model_count)])
    if args.load:
        net.load_state_dict(torch.load(args.load, map_location=device))
        logging.info(f'Model loaded from {args.load}')

    net.to(device=device)

    val_set.transforms = Compose([
            ToTensor(),
        ])

    # 3. Create data loaders
    loader_args = dict(batch_size=batch_size, num_workers=12, pin_memory=True)
    train_loader = DataLoader(train_set, shuffle=True, **loader_args)
    val_loader = DataLoader(val_set, shuffle=True, drop_last=False, **loader_args)



    logging.info(f'''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {learning_rate}
        Training size:   {len(train_set)}
        Validation size: {len(val_set)}
        Checkpoints:     {save_checkpoint}
        Device:          {device.type}
        Images scaling:  {img_scale}
        Mixed Precision: {amp}
    ''')


    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
    warmup_ratio = 100
    warmup_epoch = int(epochs *0.2)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs-warmup_epoch, eta_min=0, last_epoch=-1)
    scheduler = GradualWarmupScheduler(optimizer, multiplier=warmup_ratio, total_epoch=warmup_epoch, after_scheduler=scheduler)

    grad_scaler = torch.cuda.amp.GradScaler(enabled=amp)
    criterion_ssim = pytorch_ssim.SSIM().to(device)
    criterion_mssim = pytorch_msssim.MSSSIM().to(device)
    criterion_mae = nn.L1Loss().to(device)
    criterion_mse = nn.MSELoss().to(device)
    criterion_lpips = LPIPS( net_type='vgg').to(device)  # choose a network type from ['alex', 'squeeze', 'vgg']
    # global_step = 0

            # Multi-task learning sigma
    sigma_re = nn.Parameter(torch.tensor(0.0, device=device, dtype=torch.float32))
    sigma_mae = nn.Parameter(torch.tensor(0.0, device=device, dtype=torch.float32))
    sigma_ssim = nn.Parameter(torch.tensor(0.0, device=device, dtype=torch.float32))
    sigma_mssim = nn.Parameter(torch.tensor(0.0, device=device, dtype=torch.float32))
    sigma_lpips = nn.Parameter(torch.tensor(0.0, device=device, dtype=torch.float32))
    sigma_mse = nn.Parameter(torch.tensor(0.0, device=device, dtype=torch.float32))
    sigma_dict = { 're':sigma_re, 'mae':sigma_mae,'ssim':sigma_ssim,'ssim':sigma_mssim,'lpips':sigma_lpips,'mse':sigma_mse,}
    for epoch in range(1, epochs+1):
        net.train()
        with tqdm(total=len(train_set), desc=f'Epoch {epoch}/{epochs}', unit='img') as pbar:
            for batch in train_loader:
                t1,t2_masked,t2 = batch['t1'],batch['t2_mask'],batch['t2']

                t1 = t1.to(device=device, dtype=torch.float32)
                t2_masked = t2_masked.to(device=device, dtype=torch.float32)
                t2_true = t2.to(device=device, dtype=torch.float32)
                


                with torch.cuda.amp.autocast(enabled=amp):
                    
                    loss = 0

                    for block in net:
                        loss_dict = {}   
                        t2_pred = block(t2_masked,t1)
                        t2_masked = t2_pred

                    # if type(t2_pred) == list:
                    #     output1 = t2_pred[1]
                    #     output2 = t2_pred[2]
                    #     t2_pred = t2_pred[0]
                    #     loss_re = 1 - criterion_mae(output1, t1) + 1 - criterion_mae(output2, t2_mask)
                    #     loss_dict ['re'] = loss_re
                    if mae :
                        loss_mae  = 1 - criterion_mae(t2_pred, t2_true)
                        loss_dict ['mae'] = loss_mae
                    if ssim :
                        loss_ssim = 1- criterion_ssim(t2_pred, t2_true)
                        loss_dict ['ssim'] = loss_ssim
                    if mssim :
                        loss_mssim = 1- criterion_mssim(t2_pred, t2_true)
                        loss_dict ['mssim'] = loss_mssim
                    
                    if lpips:
                        loss_lpips = criterion_lpips(t2_pred, t2_true)
                        loss_dict ['lpips'] = loss_lpips
                    if mse:
                        loss_mse = criterion_mse(t2_pred, t2_true)
                        loss_dict ['mse'] = loss_mse

                    
                    

                    for key in loss_dict.keys():
                        loss_single = loss_dict[key]
                        sigma_single = torch.exp(sigma_dict[key])
                        loss += loss_single /sigma_single/sigma_single+torch.log(sigma_single+1.0)
                        

                optimizer.zero_grad(set_to_none=True)
                grad_scaler.scale(loss).backward()
                grad_scaler.step(optimizer)
                grad_scaler.update()

                pbar.update(t1.shape[0])


                pbar.set_postfix(**{'loss (batch)': loss.item()})



        # Evaluation after 1 epoch 

        val_ssim,val_t1,val_t2_mask,val_t2_true,val_t2_pred = evaluate(net, val_loader, device,criterion_ssim,acceleration)
        val_psnr,_,_,_,_ = evaluate(net, val_loader, device,criterion_psnr,acceleration)
        # val_mssim,_,_,_,_ = evaluate(net, val_loader, device,criterion_mssim)
        # val_lpips,_,_,_,_ = evaluate(net, val_loader, device,criterion_lpips)
        # val_mse,_,_,_,_ = evaluate(net, val_loader, device,criterion_mse)
        # val_mae,_,_,_,_ = evaluate(net, val_loader, device,criterion_mae)
        # scheduler.step(val_score)
        scheduler.step()

        # log

        experiment.log({
                f'train loss ssim': loss_ssim,
                # f'train loss mssim' : loss_mssim,
                f'train loss mse': loss_mse,
                # f'train loss mae': loss_mae,
                f'train loss total': loss,
            },step=epoch)
        logging.info(f'Validation ssim: {val_ssim},Validation psnr:{val_psnr}',)
        print((f'epoch {epoch}:Validation ssim: {val_ssim},Validation psnr:{val_psnr}',))
        experiment.log({
            f'learning rate': optimizer.param_groups[0]['lr'],
            f'metrics/validation SSIM': val_ssim,
            f'metrics/validation PSNR': val_psnr,
            # f'metrics/validation MSSIM': val_mssim,
            # f'metrics/validation LPIPS': val_lpips,
            # f'metrics/validation MAE': val_mae,
            # f'metrics/validation MSE': val_mse,
        },step=epoch)
        # difference = (val_t2_pred.cpu()-val_t2_true.cpu()).abs()
        # difference_10 = difference*10
        # difference_10[difference_10>1.0]=1.0
        experiment.log({
        # 'train/t2_pred': wandb.Image(t2_pred[0].cpu()),
        # 'train/t2_true': wandb.Image(t2_true[0].cpu()),
        'validation/t1': wandb.Image(val_t1.cpu()),
        'validation/t2_mask': wandb.Image(val_t2_mask.cpu()),
        'validation/t2_pred': wandb.Image(val_t2_pred.cpu()),
        'validation/t2_true': wandb.Image(val_t2_true.cpu()),
        # 'validation/difference_pred&true': wandb.Image(difference*255),
        # 'validation/difference*10_pred&true': wandb.Image(difference_10),
        # 'validation/difference_max' : torch.max(difference.view(-1)),
        },step=epoch)
    if save_checkpoint:
        Path(dir_checkpoint).mkdir(parents=True, exist_ok=True)
        torch.save(net.state_dict(), str(dir_checkpoint / f'{model}_{acceleration}_{model_count}.pth'))

def get_args():
    parser = argparse.ArgumentParser(description='Train the UNet on images and target masks')
    parser.add_argument('--model', '-m', type=str, default='unet', help='model name ')
    parser.add_argument('--epochs', '-e', metavar='E', type=int, default=50, help='Number of epochs')
    parser.add_argument('--batch-size', '-b', dest='batch_size', metavar='B', type=int, default=1, help='Batch size')
    parser.add_argument('--learning-rate', '-l', metavar='LR', type=float, default=1e-4,
                        help='Learning rate', dest='lr')
    parser.add_argument('--load', '-f', type=str, default=False, help='Load model from a .pth file')
    parser.add_argument('--scale', '-s', type=float, default=1.0, help='Downscaling factor of the images')
    parser.add_argument('--validation', '-v', dest='val', type=float, default=10.0,
                        help='Percent of the data that is used as validation (0-100)')
    parser.add_argument('--amp', action='store_true', default=False, help='Use mixed precision')
    parser.add_argument('--bilinear', action='store_true', default=False, help='Use bilinear upsampling')
    parser.add_argument('--classes', '-c', type=int, default=1, help='Number of classes')
    parser.add_argument('--ssim', action='store_true', default=False, help='ssim loss')
    parser.add_argument('--mssim', action='store_true', default=False, help='mssim loss')
    parser.add_argument('--lpips', action='store_true', default=False, help='lpips loss')
    parser.add_argument('--mse', action='store_true', default=False, help='mse loss')
    parser.add_argument('--mae', action='store_true', default=False, help='mae loss')
    parser.add_argument('--model_count', type=int, default=12, help='mae loss')
    parser.add_argument('--acceleration', type=int, default=4, help='mae loss')
    return parser.parse_args()


if __name__ == '__main__':
    args = get_args()

    dir_t1 = Path(f'./data_{args.acceleration}/t1/')
    dir_t2_mask = Path(f'./data_{args.acceleration}/t2_mask/')
    dir_t2 = Path(f'./data_{args.acceleration}/t2/')
    dir_fid = Path(f'./data/t2_fid')

    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using device {device}')

    # (Initialize logging)
    experiment = wandb.init(project='klf_project', resume='allow', anonymous='must',name=f'{args.model}_accelerate{args.acceleration}_cascade{args.model_count}_{time.strftime("%Y-%m-%d %H:%M:%S")}')
    experiment.config.update(dict(epochs=args.epochs, batch_size=args.batch_size, learning_rate=args.lr,))

    print(f'{args.model}_accelerate{args.acceleration}_model_count{args.model_count}')

    prob_all_folder = []
    label_all_folder = []
    val_scores = 0

    # prepare kfolder dataset
    train_image_path_list,test_image_path_list = split_dataset(image_dir=dir_t1)

    train_set = BasicDataset(dir_t1, dir_t2_mask,dir_t2, dir_fid,args.scale)
    val_set = BasicDataset(dir_t1, dir_t2_mask,dir_t2, dir_fid,args.scale)
    train_set.ids = train_image_path_list
    val_set.ids = test_image_path_list
    logging.info(f'Creating train dataset with {len(train_set)} examples,test dataset with {len(val_set)} examples.')

    # train
    train_net(
        train_set=train_set,val_set=val_set,
        experiment = experiment,
        model = args.model,
            epochs=args.epochs,
            batch_size=args.batch_size,
            learning_rate=args.lr,
            device=device,
            img_scale=args.scale,
            val_percent=args.val / 100,
            amp=args.amp,
            mse = args.mse,
            mssim= args.mssim,
            ssim = args.ssim,
            lpips = args.lpips,
            model_count = args.model_count,
            acceleration = args.acceleration
            )
