'''
Author: zhanwei xu
Date: 2022-09-18 13:28:55
LastEditors: xu-zhanwei xzw14@tsinghua.org.cn
LastEditTime: 2022-10-02 20:47:09
Description: 

Copyright (c) 2022 by zhanwei xu, Tsinghua University, All Rights Reserved. 
'''
import sys
sys.path.append("../")
from config import args
import torch
import torch.nn.functional as F
import numpy as np
from torch.optim import Adam
from voxelmorph import U_Network,SpatialTransformer
from loss import gradient_loss,mse_loss,ncc_loss
import pathlib
import cv2
from dataset import myDataset
from tqdm import tqdm
def train():
    device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() else 'cpu')
    # 创建配准网络（UNet）和STN
    num_block = [2,2,2,2]
    if args.model == "vm1":
        nf_dec = [128, 64, 32, 32, 8, 8]
    else:
        nf_dec = [32, 32, 32, 32, 32, 16, 16]
    UNet = U_Network(num_block, nf_dec).to(device)
    STN = SpatialTransformer().to(device)
    UNet.train()
    STN.train()

    # Set optimizer and losses
    opt = Adam(UNet.parameters(), lr=args.lr)
    sim_loss_fn = ncc_loss if args.sim_loss == "ncc" else mse_loss
    grad_loss_fn = gradient_loss

    # Load data
    train_dataset = myDataset(args.root_dir, 'train')
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=12)
    valid_dataset = myDataset(args.root_dir, 'valid')
    valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=True, num_workers=12)

    # Training
    for i in range(args.n_iter):
        # train
        UNet.train()
        STN.train()
        tar = tqdm(train_loader)
        for batch_idx, (id,img,deformation_label,mask) in enumerate(tar):
            img = img.to(device)
            deformation_label = deformation_label.to(device)
            mask = mask.to(device)
            # forward
            deformation = UNet(img)
            # print(deformation_label)
            # import pdb; pdb.set_trace()
            # img_ = STN(img[:1],deformation_label[:1])
            # img_ = img_.cpu().numpy()
            # img_ = np.squeeze(img_)
            # img_ = np.transpose(img_,(1,2,0))
            # img_ = img_*255
            # img_ = img_.astype(np.uint8)
            # cv2.imwrite('img_.png',img_)
            # # loss
            # print(deformation_label)
            # import pdb; pdb.set_trace()
            sim_loss = sim_loss_fn(deformation_label, deformation,mask)
            grad_loss = grad_loss_fn(deformation)
            # print(sim_loss.item(),grad_loss.item())
            loss = sim_loss + args.alpha * grad_loss
            # backward
            opt.zero_grad()
            loss.backward()
            opt.step()
            # print
            tar.set_description('Train Epoch: {} [{:04d}/{:04d} ({:.0f}%)]Loss: {:.6f},{:.2f},{:.2f}'.format(
                i, batch_idx * len(img), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item(),sim_loss.item(),grad_loss.item()))
        # valid
        UNet.eval()
        STN.eval()
        with torch.no_grad():
            tar = tqdm(valid_loader)
            for batch_idx, (id,img,deformation_label,mask) in enumerate(tar):
                img = img.to(device)
                deformation_label = deformation_label.to(device)
                mask = mask.to(device)
                
                # forward
                deformation = UNet(img)
                
                # loss
                sim_loss = sim_loss_fn(deformation_label, deformation,mask)
                grad_loss = grad_loss_fn(deformation)
                loss = sim_loss + args.alpha * grad_loss
                
                # print
                tar.set_description('Valid Epoch: {} [{:04d}/{:04d} ({:.0f}%)]Loss: {:.6f}'.format(
                    i, batch_idx * len(img), len(valid_loader.dataset),
                    100. * batch_idx / len(valid_loader), loss.item()))
        # save
        if i % args.n_save_iter == 0:
            pathlib.Path(args.model_dir).mkdir(parents=True, exist_ok=True)
            torch.save(UNet.state_dict(), args.model_dir + '/UNet_{}.pth'.format(i))
            torch.save(STN.state_dict(), args.model_dir + '/STN_{}.pth'.format(i))





