import argparse
import logging
from pathlib import Path
import pytorch_ssim
import numpy as np
import copy

import torch
import torch.nn as nn


from torch.utils.data import DataLoader, random_split,ConcatDataset
from tqdm import tqdm

from utils.data_loading import BasicDataset, CarvanaDataset
from unet import UNet ,ResUNet,SeResUNet,hinet
from utils.transforms  import ToTensor,Compose,RandomRotate,Center_Crop,RandomAffine
from utils.utils import seed_everything,kfolder,split_dataset
from evaluate import data_consistency

import numpy as np
import matplotlib.pyplot as plt
import os
import io
from PIL import Image

from reportlab.graphics import renderPM
from svglib.svglib import svg2rlg
seed_everything(2022)


def get_args():
    parser = argparse.ArgumentParser(description='Train the UNet on images and target masks')
    parser.add_argument('--model', '-m', type=str, default='unet', help='model name ')
    parser.add_argument('--load', '-f', type=str, default='checkpoints', help='Load model from a .pth file')
    parser.add_argument('--scale', '-s', type=float, default=1.0, help='Downscaling factor of the images')
    parser.add_argument('--validation', '-v', dest='val', type=float, default=10.0,
                        help='Percent of the data that is used as validation (0-100)')
    parser.add_argument('--bilinear', action='store_true', default=False, help='Use bilinear upsampling')
    parser.add_argument('--batch-size', '-b', dest='batch_size', metavar='B', type=int, default=1, help='Batch size')
    parser.add_argument('--acceleration', type=int, default=4, help='mae loss')
    parser.add_argument('--model_counts', type=int, default=1, help='mae loss')
    
    return parser.parse_args()

def test(val_set,
              device,
              model:str ='unet', 
              batch_size: int = 1,
              acceleration = 4):
    criterion = pytorch_ssim.SSIM().to(device)
    if model =='unet':
        net = nn.ModuleList([UNet(n_channels=1, n_classes=1, bilinear=args.bilinear,device=device) for _ in range(args.model_counts)])
    elif model =='resunet':
        net = nn.ModuleList([ResUNet(n_channels=1, n_classes=1, bilinear=args.bilinear,device=device) for _ in range(args.model_counts)])
    elif model =='seresunet':
        net = nn.ModuleList([SeResUNet(n_channels=1, n_classes=1, bilinear=args.bilinear,device=device) for _ in range(args.model_counts)])
    elif model =='hinet':
        net= hinet.Multi_modal_generator(1,1,32).to(device=device)
    if args.load:
        logging.info(f'Model loaded from {args.load}/{args.model}_{args.acceleration}_{args.model_counts}.pth')
        net.load_state_dict(torch.load(f'{args.load}/{args.model}_{args.acceleration}_{args.model_counts}.pth', map_location=device))

    net.to(device=device)

    val_set.transforms = Compose([
            ToTensor(),
        ])

    # 3. Create data loaders
    loader_args = dict(batch_size=batch_size, num_workers=12, pin_memory=True)
    val_loader = DataLoader(val_set, shuffle=False, drop_last=False, **loader_args)

    net.eval()
    img_total = 0
    # iterate over the validation set

    all_data_npy = np.ones([432,256,256])

    for batch in tqdm(val_loader):
        t1,t2_mask,t2 = batch['t1'],batch['t2_mask'],batch['t2']
        fid = batch['fid']
        t1 = t1.to(device=device, dtype=torch.float32)
        t2_mask = t2_mask.to(device=device, dtype=torch.float32)
        t2_true = t2.to(device=device, dtype=torch.float32)

        with torch.no_grad():
            # predict the mask
            for block in net:   
                t2_pred = block(t2_mask,t1)
                t2_mask = t2_pred

            # t2_pred = torch.squeeze(t2_pred)

            # t2_pred = data_consistency (fid.cpu().numpy(),  t2_pred.cpu().numpy(),acceleration)              
            # t2_pred = torch.tensor(t2_pred,dtype=torch.float32,device=device)

            # t2_pred = torch.unsqueeze(t2_pred,1)
            
            for img_index in range(t2_pred.shape[0]):
                npy_data = t2_pred[img_index].cpu().numpy()
                all_data_npy[img_total+img_index] = copy.deepcopy(npy_data)

            img_total+=t2_pred.shape[0]
        loss = criterion(t2_pred, t2_true)
        print (loss)
    all_data_npy = np.reshape(all_data_npy,[3,8,18,256,256])

    np.save(f'pred_npy/{args.model}_{args.acceleration}_{args.model_counts}.npy',all_data_npy)
                

if __name__ == '__main__':
    args = get_args()
    dir_t1 = Path(f'./data_{args.acceleration}/t1/')
    dir_t2_mask = Path(f'./data_{args.acceleration}/t2_mask/')
    dir_t2 = Path(f'./data_{args.acceleration}/t2/')
    dir_fid = Path(f'./data/t2_fid')
    fid_data = np.load( 'dataset/fid/t2_all_fid.npy')
    if not os.path.exists(f'pred_img/{args.model}_{args.acceleration}_{args.model_counts}/'):
        os.mkdir(f'pred_img/{args.model}_{args.acceleration}_{args.model_counts}/')
    print(fid_data.shape)
    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using device {device}')

    # prepare kfolder dataset
    train_image_path_list,test_image_path_list = split_dataset()


    val_set = BasicDataset(dir_t1, dir_t2_mask,dir_t2,dir_fid, args.scale)
    val_set.ids = test_image_path_list
    print(test_image_path_list)
    test(val_set,
              device,
              args.model, 
              args.batch_size,
              args.acceleration)



    img_data = np.load(f'pred_npy/{args.model}_{args.acceleration}_{args.model_counts}.npy')

    original_img_data = os.listdir()
    
    # [3,8,18,256,256]
    for sample in range(img_data.shape[0]):
        for slice in range(img_data.shape[2]):
            img = np.zeros([img_data.shape[3], img_data.shape[4]])
            for coil in range(img_data.shape[1]):
                temp = img_data[sample, coil, slice]
                img = img + temp ** 2
            img = np.sqrt(img)
            if sample ==0:
                plt.ion()
                plt.figure()
                plt.imshow(img, cmap='gray')
                plt.axis('off')
                img_io = io.BytesIO()
                plt.savefig(f'pred_img/{args.model}_{args.acceleration}_{args.model_counts}/{sample}__{slice}.svg',bbox_inches='tight',pad_inches=0.0,dpi=300,format='svg')
                img_save = svg2rlg((f'pred_img/{args.model}_{args.acceleration}_{args.model_counts}/{sample}__{slice}.svg'))
                renderPM.drawToFile(img_save,f'pred_img/{args.model}_{args.acceleration}_{args.model_counts}/{sample}__{slice}.tiff',fmt='TIFF',dpi=300)
                plt.ioff()
