import os
import numpy as np
import time
import datetime
import torch
import torchvision
from torch import optim
from torch.autograd import Variable
import torch.nn.functional as F
from evaluation import *
from botunet import *
import csv
import cv2
import ShapeMatching
import math
import torch.optim as optim
from networks.vit_seg_modeling import VisionTransformer as ViT_seg
from networks.vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg
from networks.vnet import VNet
from torch.nn import BCEWithLogitsLoss, MSELoss, CrossEntropyLoss
from networks.unetpp import NestedUNet
from networks.fcn import get_fcn8s
#from networks.ssl import realU_Net
from networks.discriminator import get_fc_discriminator,get_fc_discriminator2
from utils.func import loss_calc, bce_loss
from utils.util import compute_sdf
from utils import ramps, losses, metrics
from utils.func import prob_2_entropy,prob_2_entropy2
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"

class Solver(object):
	def __init__(self, config, train_loader, valid_loader, test_loader, train_loader_GAN):

		# Data loader
		self.train_loader = train_loader
		self.valid_loader = valid_loader
		self.test_loader = test_loader
		self.train_loader_GAN = train_loader_GAN

		# Models
		self.unet = None
		self.optimizer = None
		self.img_ch = config.img_ch
		self.output_ch = config.output_ch
		self.criterion = torch.nn.BCELoss()
		self.augmentation_prob = config.augmentation_prob

		# Hyper-parameters
		self.lr = config.lr
		self.beta1 = config.beta1
		self.beta2 = config.beta2

		# Training settings
		self.num_epochs = config.num_epochs
		self.num_epochs_decay = config.num_epochs_decay
		self.batch_size = config.batch_size

		# Step size
		self.log_step = config.log_step
		self.val_step = config.val_step

		# Path
		self.model_path = config.model_path
		self.result_path = config.result_path
		self.mode = config.mode

		self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
		self.model_type = config.model_type
		self.t = config.t
		self.build_model()

	def build_model(self):
		"""Build generator and discriminator."""
		if self.model_type =='U_Net':
			self.unet = U_Net(img_ch=3,output_ch=1)
		elif self.model_type =='realU_Net':
			self.unet = realU_Net(img_ch=3,output_ch=1)
		elif self.model_type =='R2U_Net':
			self.unet = R2U_Net(img_ch=3,output_ch=1,t=self.t)
		elif self.model_type =='AttU_Net':
			self.unet = AttU_Net(img_ch=3,output_ch=1)
		elif self.model_type == 'R2AttU_Net':
			self.unet = R2AttU_Net(img_ch=3,output_ch=1,t=self.t)
		elif self.model_type == 'TransU_Net':
			config_vit = CONFIGS_ViT_seg['R50-ViT-B_16']
			self.unet = ViT_seg(config_vit, img_size=256, num_classes=1)
		elif self.model_type == 'Vnet':
			self.unet = VNet(elu=False, nll=True)
		elif self.model_type == 'Unetpp':
			self.unet = NestedUNet(self,3,1)
		elif self.model_type == 'fcn8s':
			self.unet = get_fcn8s(1)
		if self.model_type =='GTU_Netv2':
			self.unet = GTU_Netv2(img_ch=3,output_ch=1)
		if self.model_type =='GTU_Net_GAN':
			self.unet = GTU_Net_GAN(img_ch=3,output_ch=1)
		if self.model_type =='GTU_Netv3':
			self.unet = GTU_Netv3(img_ch=3,output_ch=1)
		if self.model_type =='GTU_Netv3_GAN':
			self.unet = GTU_Netv3_GAN(img_ch=3,output_ch=1)
		if self.model_type =='GTU_Netv4_GAN':
			self.unet = GTU_Netv4_GAN(img_ch=3,output_ch=1)

		self.optimizer = optim.Adam(list(self.unet.parameters()),
									  self.lr, [self.beta1, self.beta2])
		#self.unet.to(self.device)
		#os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"
		#device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")

		ngpu=2
		if ngpu > 1:
			self.unet = torch.nn.DataParallel(self.unet, device_ids=list(range(ngpu))) #list(range(ngpu))

		if ngpu > 0:
			self.unet.to(self.device)

	# self.print_network(self.unet, self.model_type)

	def print_network(self, model, name):
		"""Print out the network information."""
		num_params = 0
		for p in model.parameters():
			num_params += p.numel()
		print(model)
		print(name)
		print("The number of parameters: {}".format(num_params))

	def to_data(self, x):
		"""Convert variable to tensor."""
		if torch.cuda.is_available():
			x = x.cpu()
		return x.data

	def update_lr(self, g_lr, d_lr):
		for param_group in self.optimizer.param_groups:
			param_group['lr'] = lr

	def reset_grad(self):
		"""Zero the gradient buffers."""
		self.unet.zero_grad()

	def compute_accuracy(self,SR,GT):
		SR_flat = SR.view(-1)
		GT_flat = GT.view(-1)

		acc = GT_flat.data.cpu()==(SR_flat.data.cpu()>0.5)

	def tensor2img(self,x):
		img = (x[:,0,:,:]>x[:,1,:,:]).float()
		img = img*255
		return img


	def train(self):
		"""Train encoder, generator and discriminator."""

		#====================================== Training ===========================================#
		#===========================================================================================#
		
		unet_path = os.path.join(self.model_path, '%s-%d-%.4f-%d-%.4f.pkl' %(self.model_type,self.num_epochs,self.lr,self.num_epochs_decay,self.augmentation_prob))

		# U-Net Train
		if os.path.isfile(unet_path):
			# Load the pretrained Encoder
			self.unet.load_state_dict(torch.load(unet_path))
			print('%s is Successfully Loaded from %s'%(self.model_type,unet_path))
		else:
			# Train for Encoder
			lr = self.lr
			best_unet_score = 0.
			
			for epoch in range(self.num_epochs):

				self.unet.train(True)
				epoch_loss = 0
				
				acc = 0.	# Accuracy
				SE = 0.		# Sensitivity (Recall)
				SP = 0.		# Specificity
				PC = 0. 	# Precision
				F1 = 0.		# F1 Score
				JS = 0.		# Jaccard Similarity
				DC = 0.		# Dice Coefficient
				length = 0

				for i, (images, GT,filename) in enumerate(self.train_loader):
					# GT : Ground Truth

					images = images.to(self.device)
					GT = GT.to(self.device).float()

					# SR : Segmentation Result
					SR = self.unet(images)
					SR_probs = F.sigmoid(SR)
					SR_flat = SR_probs.view(SR_probs.size(0),-1)

					GT_flat = GT.view(GT.size(0),-1)
					loss = self.criterion(SR_flat,GT_flat)

					SR_out = (SR > 0.5).float()
					torchvision.utils.save_image(images[0].data.cpu(),
												 os.path.join(self.result_path,
															  '%s_train_%d_image.png' % (self.model_type, epoch + 1)))
					torchvision.utils.save_image(SR[0].data.cpu(),
												 os.path.join(self.result_path,
															  '%s_train_%d_SR.png' % (self.model_type, epoch + 1)))
					torchvision.utils.save_image(GT[0].data.cpu(),
												 os.path.join(self.result_path,
															  '%s_train_%d_GT.png' % (self.model_type, epoch + 1)))
					for t in range(len(SR)):
						acc += get_accuracy(SR[t], GT[t])
						SE += get_sensitivity(SR[t], GT[t])
						SP += get_specificity(SR[t], GT[t])
						PC += get_precision(SR[t], GT[t])
						F1 += get_F1(SR[t], GT[t])
						JS += get_JS(SR[t], GT[t])
						DC += get_DC(SR[t], GT[t])
						length += 1

					epoch_loss += loss.item()
					# Backprop + optimize
					self.reset_grad()
					loss.backward()
					self.optimizer.step()



				acc = acc/length
				SE = SE/length
				SP = SP/length
				PC = PC/length
				F1 = F1/length
				JS = JS/length
				DC = DC/length

				# Print the log info
				print('Epoch [%d/%d], Loss: %.4f, \n[Training] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f' % (
					  epoch+1, self.num_epochs, \
					  epoch_loss,\
					  acc,SE,SP,PC,F1,JS,DC))

			

				# Decay learning rate
				if (epoch+1) > (self.num_epochs - self.num_epochs_decay):
					lr -= (self.lr / float(self.num_epochs_decay))
					for param_group in self.optimizer.param_groups:
						param_group['lr'] = lr
					print ('Decay learning rate to lr: {}.'.format(lr))
				
				
				#===================================== Validation ====================================#
				self.unet.train(False)
				self.unet.eval()

				acc = 0.	# Accuracy
				SE = 0.		# Sensitivity (Recall)
				SP = 0.		# Specificity
				PC = 0. 	# Precision
				F1 = 0.		# F1 Score
				JS = 0.		# Jaccard Similarity
				DC = 0.		# Dice Coefficient
				length=0
				for i, (images, GT,filename) in enumerate(self.valid_loader):

					images = images.to(self.device)
					GT = GT.to(self.device).float()
					SR = F.sigmoid(self.unet(images))

					for t in range(len(SR)):
						acc += get_accuracy(SR[t], GT[t])
						SE += get_sensitivity(SR[t], GT[t])
						SP += get_specificity(SR[t], GT[t])
						PC += get_precision(SR[t], GT[t])
						F1 += get_F1(SR[t], GT[t])
						JS += get_JS(SR[t], GT[t])
						DC += get_DC(SR[t], GT[t])
						length += 1
						SR[t] = (SR[t] > 0.5).float()
						filename = filename[0].split(".")[0]
						torchvision.utils.save_image(SR[t].data.cpu(),
													 os.path.join(self.result_path,
																  'val_%s_SR.png' % (filename)))
						torchvision.utils.save_image(images[t].data.cpu(),
													 os.path.join(self.result_path,
																  'val_%s_image.png' % (filename)))
						torchvision.utils.save_image(GT[t].data.cpu(),
													 os.path.join(self.result_path,
																  '_val_%s_GT.png' % (filename)))
					
				acc = acc/length
				SE = SE/length
				SP = SP/length
				PC = PC/length
				F1 = F1/length
				JS = JS/length
				DC = DC/length
				#unet_score = JS + DC
				unet_score = JS + DC
				print('[Validation] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f'%(acc,SE,SP,PC,F1,JS,DC))


				# Save Best U-Net model
				if unet_score > best_unet_score:
					best_unet_score = unet_score
					best_epoch = epoch
					best_unet = self.unet.state_dict()
					print('Best %s model score : %.4f'%(self.model_type,best_unet_score))
					torch.save(best_unet,unet_path)
					
			#===================================== Test ====================================#

			del self.unet
			del best_unet
			self.build_model()
			self.unet.load_state_dict(torch.load(unet_path))

			self.unet.train(False)
			self.unet.eval()

			acc = 0.	# Accuracy
			SE = 0.		# Sensitivity (Recall)
			SP = 0.		# Specificity
			PC = 0. 	# Precision
			F1 = 0.		# F1 Score
			JS = 0.		# Jaccard Similarity
			DC = 0.		# Dice Coefficient
			length=0
			for i, (images, GT,filename) in enumerate(self.test_loader):

				images = images.to(self.device)
				GT = GT.to(self.device)
				SR = F.sigmoid(self.unet(images))

				for t in range(len(SR)):
					acc += get_accuracy(SR[t], GT[t])
					SE += get_sensitivity(SR[t], GT[t])
					SP += get_specificity(SR[t], GT[t])
					PC += get_precision(SR[t], GT[t])
					F1 += get_F1(SR[t], GT[t])
					JS += get_JS(SR[t], GT[t])
					DC += get_DC(SR[t], GT[t])
					length += 1

			acc = acc/length
			SE = SE/length
			SP = SP/length
			PC = PC/length
			F1 = F1/length
			JS = JS/length
			DC = DC/length
			unet_score = JS + DC
			print('[test] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f ,unet_score: %.4f' % (
				acc, SE, SP, PC, F1, JS, DC, unet_score))

			f = open(os.path.join(self.result_path,'result.csv'), 'a', encoding='utf-8', newline='')
			wr = csv.writer(f)
			wr.writerow([self.model_type,acc,SE,SP,PC,F1,JS,DC,unet_score,self.lr,best_epoch,self.num_epochs,self.num_epochs_decay,self.augmentation_prob])
			f.close()



	def test(self):
		unet_path = os.path.join(self.model_path, "GTU_Netv4_GAN-200-0.0002-70-0.5000.pkl")
		self.build_model()
		# U-Net Train
		if os.path.isfile(unet_path):
			# Load the pretrained Encoder
			self.unet.load_state_dict(torch.load(unet_path))
			print('%s is Successfully Loaded from %s' % (self.model_type, unet_path))


		#self.unet.load_state_dict(torch.load(unet_path))

		self.unet.train(False)
		self.unet.eval()

		acc = 0.  # Accuracy
		SE = 0.  # Sensitivity (Recall)
		SP = 0.  # Specificity
		PC = 0.  # Precision
		F1 = 0.  # F1 Score
		JS = 0.  # Jaccard Similarity
		DC = 0.  # Dice Coefficient
		length = 0
		for i, (images, GT,filenames) in enumerate(self.test_loader):

			images = images.to(self.device)
			GT = GT.to(self.device).float()
			pred_src_aux, out,d2,d3_1,d4_1 =self.unet(images)
			SR = F.sigmoid(out)

			for t in range(len(SR)):
				acc += get_accuracy(SR[t], GT[t])
				SE += get_sensitivity(SR[t], GT[t])
				SP += get_specificity(SR[t], GT[t])
				PC += get_precision(SR[t], GT[t])
				F1 += get_F1(SR[t], GT[t])
				JS += get_JS(SR[t], GT[t])
				DC += get_DC(SR[t], GT[t])
				length += 1
				filename=filenames[t].split(".")[0]

				SR[t] = (SR[t] > 0.5).float()
				torchvision.utils.save_image(SR[t].data.cpu(),
											 os.path.join(self.result_path,
														  '%s_SR.png' % (filename)))
				torchvision.utils.save_image(GT[t].data.cpu(),
											 os.path.join(self.result_path,
														  '%s_GT.png' % (filename)))

		
		acc = acc / length
		SE = SE / length
		SP = SP / length
		PC = PC / length
		F1 = F1 / length
		JS = JS / length
		DC = DC / length
		unet_score = JS + DC
		print('[test] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f ,unet_score: %.4f' % (
			acc, SE, SP, PC, F1, JS, DC, unet_score))

		f = open(os.path.join(self.result_path, 'result.csv'), 'a', encoding='utf-8', newline='')
		wr = csv.writer(f)
		wr.writerow(
			[self.model_type, acc, SE, SP, PC, F1, JS, DC,unet_score, self.lr, self.num_epochs, self.num_epochs_decay,
			 self.augmentation_prob])
		f.close()
		del self.unet


	def trainGAN(self):
		"""Train encoder, generator and discriminator."""

		#====================================== Training ===========================================#
		#===========================================================================================#
		
		unet_path = os.path.join(self.model_path, '%s-%d-%.4f-%d-%.4f.pkl' %(self.model_type,self.num_epochs,self.lr,self.num_epochs_decay,self.augmentation_prob))

		# U-Net Train
		if os.path.isfile(unet_path):
			# Load the pretrained Encoder
			self.unet.load_state_dict(torch.load(unet_path))
			print('%s is Successfully Loaded from %s'%(self.model_type,unet_path))
		else:
			# Train for Encoder
			lr = self.lr
			best_unet_score = 0.
			kl_distance = torch.nn.KLDivLoss(reduction='none')
			d_aux = get_fc_discriminator(num_classes=1)
			d_aux.train()
			d_aux.to(self.device)
			optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=1e-4,
			                             betas=(0.9, 0.99))


			#os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"
			ngpu = 1
			if ngpu > 1:
				d_aux = torch.nn.DataParallel(d_aux, device_ids=list(range(ngpu)))
				#d_main = torch.nn.DataParallel(d_main, device_ids=list(range(ngpu)))


			source_label = 0
			target_label = 1
			
			for epoch in range(self.num_epochs):
				
				self.unet.train(True)
				epoch_loss = 0
				epoch_loss1 = 0
				epoch_dux_loss = 0
				
				acc = 0.	# Accuracy
				SE = 0.		# Sensitivity (Recall)
				SP = 0.		# Specificity
				PC = 0. 	# Precision
				F1 = 0.		# F1 Score
				JS = 0.		# Jaccard Similarity
				DC = 0.		# Dice Coefficient
				length = 0

				for i, (images, GT,filename,images_GAN) in enumerate(self.train_loader_GAN):
					# GT : Ground Truth
					optimizer_d_aux.zero_grad()
					#optimizer_d_main.zero_grad()
					for param in d_aux.parameters():
						param.requires_grad = False
					#for param in d_main.parameters():
					#	param.requires_grad = False
						
					images = images.to(self.device)
					GT = GT.to(self.device).float()
					images_GAN = images_GAN.to(self.device)

					# SR : Segmentation Result ,outputs_src_tanh
					pred_src_aux, SR,d2,d3_1,d4_1 = self.unet(images)
					SR_probs = F.sigmoid(SR)
					SR_flat = SR_probs.view(SR_probs.size(0),-1)

					GT_flat = GT.view(GT.size(0),-1)
					loss_src = self.criterion(SR_flat,GT_flat)

					outputs_soft = torch.softmax(SR, dim=1)
					outputs_aux1_soft = torch.softmax(d2,dim=1)
					outputs_aux2_soft = torch.softmax(d3_1,dim = 1)
					outputs_aux3_soft = torch.softmax(d4_1,dim =1)

					preds = (outputs_soft + outputs_aux1_soft + outputs_aux2_soft + outputs_aux3_soft) / 4

					variance_aux0 = torch.sum(kl_distance(
						torch.log(outputs_soft), preds), dim=1, keepdim=True)
					exp_variance_aux0 = torch.exp(-variance_aux0)

					variance_aux1 = torch.sum(kl_distance(
						torch.log(outputs_aux1_soft), preds), dim=1, keepdim=True)
					exp_variance_aux1 = torch.exp(-variance_aux1)

					variance_aux2 = torch.sum(kl_distance(
						torch.log(outputs_aux2_soft), preds), dim=1, keepdim=True)
					exp_variance_aux2 = torch.exp(-variance_aux2)

					variance_aux3 = torch.sum(kl_distance(
						torch.log(outputs_aux3_soft), preds), dim=1, keepdim=True)
					exp_variance_aux3 = torch.exp(-variance_aux3)

					consistency_dist_aux0 = (preds - outputs_soft) ** 2
					consistency_loss_aux0 = torch.mean(
						consistency_dist_aux0 * exp_variance_aux0) / (
													torch.mean(exp_variance_aux0) + 1e-8) + torch.mean(
						variance_aux0)

					consistency_dist_aux1 = (preds - outputs_aux1_soft) ** 2
					consistency_loss_aux1 = torch.mean(
						consistency_dist_aux1 * exp_variance_aux1) / (
													torch.mean(exp_variance_aux1) + 1e-8) + torch.mean(
						variance_aux1)

					consistency_dist_aux2 = (
													preds - outputs_aux2_soft) ** 2
					consistency_loss_aux2 = torch.mean(
						consistency_dist_aux2 * exp_variance_aux2) / (
													torch.mean(exp_variance_aux2) + 1e-8) + torch.mean(
						variance_aux2)

					consistency_dist_aux3 = (
													preds - outputs_aux3_soft) ** 2
					consistency_loss_aux3 = torch.mean(
						consistency_dist_aux3 * exp_variance_aux3) / (
													torch.mean(exp_variance_aux3) + 1e-8) + torch.mean(
						variance_aux3)

					consistency_loss = (consistency_loss_aux0 + consistency_loss_aux1 + consistency_loss_aux2 + consistency_loss_aux3) / 4

					outputs_aux1_soft = outputs_aux1_soft.view(outputs_aux1_soft.size(0), -1)
					outputs_aux2_soft = outputs_aux2_soft.view(outputs_aux2_soft.size(0), -1)
					outputs_aux3_soft = outputs_aux3_soft.view(outputs_aux3_soft.size(0), -1)
					loss_ce_aux1 = self.criterion(outputs_aux1_soft, GT_flat)
					loss_ce_aux2 = self.criterion(outputs_aux2_soft, GT_flat)
					loss_ce_aux3 = self.criterion(outputs_aux3_soft, GT_flat)
					ce_loss = (loss_ce_aux1 + loss_ce_aux2 + loss_ce_aux3) / 3

					loss = loss_src + 0.1 * ce_loss + 0.01 * consistency_loss


					for t in range(len(SR)):
						acc += get_accuracy(SR[t], GT[t])
						SE += get_sensitivity(SR[t], GT[t])
						SP += get_specificity(SR[t], GT[t])
						PC += get_precision(SR[t], GT[t])
						F1 += get_F1(SR[t], GT[t])
						JS += get_JS(SR[t], GT[t])
						DC += get_DC(SR[t], GT[t])
						length += 1

					epoch_loss += loss.item()
					# Backprop + optimize
					self.reset_grad()
					loss.backward()
					
					
					# train on target
					# adversarial training ot fool the discriminator, outputs_trg_tanh
					
					pred_trg_aux ,SR, d2,d3_1,d4_1 = self.unet(images_GAN)
					d_out_aux = d_aux(pred_trg_aux)
					loss_adv_trg_aux = bce_loss(d_out_aux, source_label)

					outputs_soft = torch.softmax(SR, dim=1)
					outputs_aux1_soft = torch.softmax(d2, dim=1)
					outputs_aux2_soft = torch.softmax(d3_1, dim=1)
					outputs_aux3_soft = torch.softmax(d4_1, dim=1)
					preds = (outputs_soft + outputs_aux1_soft + outputs_aux2_soft + outputs_aux3_soft)/4

					variance_aux0 = torch.sum(kl_distance(
						torch.log(outputs_soft), preds), dim=1, keepdim=True)
					exp_variance_aux0 = torch.exp(-variance_aux0)

					variance_aux1 = torch.sum(kl_distance(
						torch.log(outputs_aux1_soft), preds), dim=1, keepdim=True)
					exp_variance_aux1 = torch.exp(-variance_aux1)

					variance_aux2 = torch.sum(kl_distance(
						torch.log(outputs_aux2_soft), preds), dim=1, keepdim=True)
					exp_variance_aux2 = torch.exp(-variance_aux2)

					variance_aux3 = torch.sum(kl_distance(
						torch.log(outputs_aux3_soft), preds), dim=1, keepdim=True)
					exp_variance_aux3 = torch.exp(-variance_aux3)

					consistency_dist_aux0 = (preds - outputs_soft) ** 2
					consistency_loss_aux0 = torch.mean(
						consistency_dist_aux0 * exp_variance_aux0) / (
													torch.mean(exp_variance_aux0) + 1e-8) + torch.mean(
						variance_aux0)

					consistency_dist_aux1 = (preds - outputs_aux1_soft) ** 2
					consistency_loss_aux1 = torch.mean(
						consistency_dist_aux1 * exp_variance_aux1) / (
														torch.mean(exp_variance_aux1) + 1e-8) + torch.mean(
						variance_aux1)

					consistency_dist_aux2 = (
													preds - outputs_aux2_soft) ** 2
					consistency_loss_aux2 = torch.mean(
						consistency_dist_aux2 * exp_variance_aux2) / (
														torch.mean(exp_variance_aux2) + 1e-8) + torch.mean(
						variance_aux2)

					consistency_dist_aux3 = (
													preds - outputs_aux3_soft) ** 2
					consistency_loss_aux3 = torch.mean(
						consistency_dist_aux3 * exp_variance_aux3) / (
														torch.mean(exp_variance_aux3) + 1e-8) + torch.mean(
						variance_aux3)

					consistency_loss = (consistency_loss_aux0 + consistency_loss_aux1 + consistency_loss_aux2 + consistency_loss_aux3) / 4


					loss1 = 0.01 * loss_adv_trg_aux + 0.001 * consistency_loss

					loss1.backward()
					epoch_loss1 += loss1


					
					for param in d_aux.parameters():
						param.requires_grad = True


					pred_src_aux = pred_src_aux.detach()
					d_out_aux = d_aux(pred_src_aux)
					loss_d_aux = bce_loss(d_out_aux, source_label)
					loss_d_aux = loss_d_aux / 2
					loss_d_aux.backward()
					epoch_dux_loss+=loss_d_aux



#------------------------------------------------------------------------------
					
					pred_trg_aux = pred_trg_aux.detach()
					d_out_aux = d_aux(pred_trg_aux)
					loss_d_aux = bce_loss(d_out_aux, target_label)
					loss_d_aux = loss_d_aux / 2
					loss_d_aux.backward()
					epoch_dux_loss+=loss_d_aux



					self.optimizer.step()
					optimizer_d_aux.step()
					#optimizer_d_main.step()

				acc = acc/length
				SE = SE/length
				SP = SP/length
				PC = PC/length
				F1 = F1/length
				JS = JS/length
				DC = DC/length

				# Print the log info
				print('Epoch [%d/%d], Loss: %.4f, Loss1: %.4f, duxLoss:  %.4f  \n[Training] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f' % (
					  epoch+1, self.num_epochs, \
					  epoch_loss,epoch_loss1,epoch_dux_loss,\
					  acc,SE,SP,PC,F1,JS,DC))
					  

				# Decay learning rate
				if (epoch+1) > (self.num_epochs - self.num_epochs_decay):
					lr -= (self.lr / float(self.num_epochs_decay))
					for param_group in self.optimizer.param_groups:
						param_group['lr'] = lr
					print ('Decay learning rate to lr: {}.'.format(lr))
				
				
				#===================================== Validation ====================================#
				self.unet.train(False)
				self.unet.eval()

				acc = 0.	# Accuracy
				SE = 0.		# Sensitivity (Recall)
				SP = 0.		# Specificity
				PC = 0. 	# Precision
				F1 = 0.		# F1 Score
				JS = 0.		# Jaccard Similarity
				DC = 0.		# Dice Coefficient
				length=0
				for i, (images, GT,filename) in enumerate(self.valid_loader):

					images = images.to(self.device)
					GT = GT.to(self.device).float()
					SR = F.sigmoid(self.unet(images)[1])

					for t in range(len(SR)):
						acc += get_accuracy(SR[t], GT[t])
						SE += get_sensitivity(SR[t], GT[t])
						SP += get_specificity(SR[t], GT[t])
						PC += get_precision(SR[t], GT[t])
						F1 += get_F1(SR[t], GT[t])
						JS += get_JS(SR[t], GT[t])
						DC += get_DC(SR[t], GT[t])
						length += 1
						SR[t] = (SR[t] > 0.5).float()
						filename = filename[0].split(".")[0]
					
				acc = acc/length
				SE = SE/length
				SP = SP/length
				PC = PC/length
				F1 = F1/length
				JS = JS/length
				DC = DC/length
				#unet_score = JS + DC
				unet_score = JS + DC
				print('[Validation] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f'%(acc,SE,SP,PC,F1,JS,DC))


				# Save Best U-Net model
				if unet_score > best_unet_score:
					best_unet_score = unet_score
					best_epoch = epoch
					best_unet = self.unet.state_dict()
					print('Best %s model score : %.4f'%(self.model_type,best_unet_score))
					torch.save(best_unet,unet_path)
					
			#===================================== Test ====================================#

			del self.unet
			del best_unet
			self.build_model()
			self.unet.load_state_dict(torch.load(unet_path))

			self.unet.train(False)
			self.unet.eval()
	
			acc = 0.  # Accuracy
			SE = 0.  # Sensitivity (Recall)
			SP = 0.  # Specificity
			PC = 0.  # Precision
			F1 = 0.  # F1 Score
			JS = 0.  # Jaccard Similarity
			DC = 0.  # Dice Coefficient
			length = 0
			for i, (images, GT,filenames) in enumerate(self.test_loader):
	
				images = images.to(self.device)
				GT = GT.to(self.device).float()
				out=self.unet(images)[1]
				SR = F.sigmoid(out)
	
				for t in range(len(SR)):
					acc += get_accuracy(SR[t], GT[t])
					SE += get_sensitivity(SR[t], GT[t])
					SP += get_specificity(SR[t], GT[t])
					PC += get_precision(SR[t], GT[t])
					F1 += get_F1(SR[t], GT[t])
					JS += get_JS(SR[t], GT[t])
					DC += get_DC(SR[t], GT[t])
					length += 1
					filename=filenames[t].split(".")[0]
	
					SR[t] = (SR[t] > 0.5).float()
					torchvision.utils.save_image(SR[t].data.cpu(),
												 os.path.join(self.result_path,
															  '%s_SR.png' % (filename)))
					torchvision.utils.save_image(GT[t].data.cpu(),
												 os.path.join(self.result_path,
															  '%s_GT.png' % (filename)))
	
			
			acc = acc / length
			SE = SE / length
			SP = SP / length
			PC = PC / length
			F1 = F1 / length
			JS = JS / length
			DC = DC / length
			unet_score = JS + DC
			print('[test] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f ,unet_score: %.4f' % (
				acc, SE, SP, PC, F1, JS, DC, unet_score))
	
			f = open(os.path.join(self.result_path, 'result.csv'), 'a', encoding='utf-8', newline='')
			wr = csv.writer(f)
			wr.writerow(
				[self.model_type, acc, SE, SP, PC, F1, JS, DC,unet_score, self.lr, self.num_epochs, self.num_epochs_decay,
				 self.augmentation_prob])
			f.close()
			del self.unet

	def trainGANv0(self):
		"""Train encoder, generator and discriminator."""

		# ====================================== Training ===========================================#
		# ===========================================================================================#

		unet_path = os.path.join(self.model_path, '%s-%d-%.4f-%d-%.4f.pkl' % (
		self.model_type, self.num_epochs, self.lr, self.num_epochs_decay, self.augmentation_prob))

		# U-Net Train
		if os.path.isfile(unet_path):
			# Load the pretrained Encoder
			self.unet.load_state_dict(torch.load(unet_path))
			print('%s is Successfully Loaded from %s' % (self.model_type, unet_path))
		else:
			# Train for Encoder
			lr = self.lr
			best_unet_score = 0.
			mse_loss = MSELoss()
			d_aux = get_fc_discriminator(num_classes=1)
			d_aux.train()
			d_aux.to(self.device)
			optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=1e-4,
										 betas=(0.9, 0.99))

			'''d_main = get_fc_discriminator2(num_classes=1)
			d_main.train()
			d_main.to(self.device)
			optimizer_d_main = optim.Adam(d_main.parameters(), lr=1e-4,betas=(0.9, 0.99))'''

			# os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"
			ngpu = 1
			if ngpu > 1:
				d_aux = torch.nn.DataParallel(d_aux, device_ids=list(range(ngpu)))
			# d_main = torch.nn.DataParallel(d_main, device_ids=list(range(ngpu)))

			source_label = 0
			target_label = 1

			for epoch in range(self.num_epochs):

				self.unet.train(True)
				epoch_loss = 0
				epoch_loss1 = 0
				epoch_dux_loss = 0

				acc = 0.  # Accuracy
				SE = 0.  # Sensitivity (Recall)
				SP = 0.  # Specificity
				PC = 0.  # Precision
				F1 = 0.  # F1 Score
				JS = 0.  # Jaccard Similarity
				DC = 0.  # Dice Coefficient
				length = 0

				for i, (images, GT, filename, images_GAN) in enumerate(self.train_loader_GAN):
					# GT : Ground Truth
					optimizer_d_aux.zero_grad()
					# optimizer_d_main.zero_grad()
					for param in d_aux.parameters():
						param.requires_grad = False
					# for param in d_main.parameters():
					#	param.requires_grad = False

					images = images.to(self.device)
					GT = GT.to(self.device).float()
					images_GAN = images_GAN.to(self.device)

					# SR : Segmentation Result ,outputs_src_tanh
					pred_src_aux, SR = self.unet(images)
					SR_probs = F.sigmoid(SR)
					SR_flat = SR_probs.view(SR_probs.size(0), -1)

					GT_flat = GT.view(GT.size(0), -1)
					loss_src = self.criterion(SR_flat, GT_flat)

					'''label_batch = GT  # .unsqueeze(1)
					with torch.no_grad():
						gt_dis = compute_sdf(label_batch.cpu().numpy(), SR_probs.shape)
						gt_dis_src = torch.from_numpy(gt_dis).float().cuda()

					loss_sdf = mse_loss(outputs_src_tanh, gt_dis_src)'''

					loss = loss_src  # + 0.3 * loss_sdf

					'''dis_to_mask_src = torch.sigmoid(-1500 * outputs_src_tanh)
					pred_entropy = prob_2_entropy2(SR_probs)
					dis_entropy = prob_2_entropy2(F.softmax(dis_to_mask_src))
					src_entropy = torch.cat((pred_entropy, dis_entropy), 1)
					src_entropy = prob_2_entropy2(SR_probs)'''

					for t in range(len(SR)):
						acc += get_accuracy(SR[t], GT[t])
						SE += get_sensitivity(SR[t], GT[t])
						SP += get_specificity(SR[t], GT[t])
						PC += get_precision(SR[t], GT[t])
						F1 += get_F1(SR[t], GT[t])
						JS += get_JS(SR[t], GT[t])
						DC += get_DC(SR[t], GT[t])
						length += 1

					epoch_loss += loss.item()
					# Backprop + optimize
					self.reset_grad()
					loss.backward()

					# train on target
					# adversarial training ot fool the discriminator, outputs_trg_tanh

					pred_trg_aux, SR = self.unet(images_GAN)
					d_out_aux = d_aux(pred_trg_aux)
					loss_adv_trg_aux = bce_loss(d_out_aux, source_label)

					'''pred_trg_main = F.sigmoid(SR)
					dis_to_mask_trg = torch.sigmoid(-1500 * outputs_trg_tanh)
					pred_entropy = prob_2_entropy2(pred_trg_main)
					dis_entropy = prob_2_entropy2(F.softmax(dis_to_mask_trg))
					trg_entropy = torch.cat((pred_entropy, dis_entropy), 1)
					trg_entropy = prob_2_entropy2(F.sigmoid(SR))
					d_out_dual = d_main(trg_entropy)
					loss_adv_trg_dual = bce_loss(d_out_dual, source_label)'''

					loss1 = 0.01 * loss_adv_trg_aux  # + 0.01 * loss_adv_trg_dual

					loss1.backward()
					epoch_loss1 += loss1

					for param in d_aux.parameters():
						param.requires_grad = True
					# for param in d_main.parameters():
					#	param.requires_grad = True

					pred_src_aux = pred_src_aux.detach()
					d_out_aux = d_aux(pred_src_aux)
					loss_d_aux = bce_loss(d_out_aux, source_label)
					loss_d_aux = loss_d_aux / 2
					loss_d_aux.backward()
					epoch_dux_loss += loss_d_aux

					'''src_entropy = src_entropy.detach()
					d_out_main = d_main(src_entropy)
					loss_d_main = bce_loss(d_out_main, source_label)
					loss_d_main = loss_d_main / 2
					loss_d_main.backward()'''

					# ------------------------------------------------------------------------------

					pred_trg_aux = pred_trg_aux.detach()
					d_out_aux = d_aux(pred_trg_aux)
					loss_d_aux = bce_loss(d_out_aux, target_label)
					loss_d_aux = loss_d_aux / 2
					loss_d_aux.backward()
					epoch_dux_loss += loss_d_aux

					'''trg_entropy = trg_entropy.detach()
					d_out_main = d_main(trg_entropy)
					loss_d_main = bce_loss(d_out_main, source_label)
					loss_d_main = loss_d_main / 2
					loss_d_main.backward()'''

					self.optimizer.step()
					optimizer_d_aux.step()
				# optimizer_d_main.step()

				acc = acc / length
				SE = SE / length
				SP = SP / length
				PC = PC / length
				F1 = F1 / length
				JS = JS / length
				DC = DC / length

				# Print the log info
				print(
					'Epoch [%d/%d], Loss: %.4f, Loss1: %.4f, duxLoss:  %.4f  \n[Training] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f' % (
						epoch + 1, self.num_epochs, \
						epoch_loss, epoch_loss1, epoch_dux_loss, \
						acc, SE, SP, PC, F1, JS, DC))

				# Decay learning rate
				if (epoch + 1) > (self.num_epochs - self.num_epochs_decay):
					lr -= (self.lr / float(self.num_epochs_decay))
					for param_group in self.optimizer.param_groups:
						param_group['lr'] = lr
					print('Decay learning rate to lr: {}.'.format(lr))

				# ===================================== Validation ====================================#
				self.unet.train(False)
				self.unet.eval()

				acc = 0.  # Accuracy
				SE = 0.  # Sensitivity (Recall)
				SP = 0.  # Specificity
				PC = 0.  # Precision
				F1 = 0.  # F1 Score
				JS = 0.  # Jaccard Similarity
				DC = 0.  # Dice Coefficient
				length = 0
				for i, (images, GT, filename) in enumerate(self.valid_loader):

					images = images.to(self.device)
					GT = GT.to(self.device).float()
					SR = F.sigmoid(self.unet(images)[1])

					for t in range(len(SR)):
						acc += get_accuracy(SR[t], GT[t])
						SE += get_sensitivity(SR[t], GT[t])
						SP += get_specificity(SR[t], GT[t])
						PC += get_precision(SR[t], GT[t])
						F1 += get_F1(SR[t], GT[t])
						JS += get_JS(SR[t], GT[t])
						DC += get_DC(SR[t], GT[t])
						length += 1
						SR[t] = (SR[t] > 0.5).float()
						filename = filename[0].split(".")[0]

				acc = acc / length
				SE = SE / length
				SP = SP / length
				PC = PC / length
				F1 = F1 / length
				JS = JS / length
				DC = DC / length
				# unet_score = JS + DC
				unet_score = JS + DC
				print('[Validation] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f' % (
				acc, SE, SP, PC, F1, JS, DC))

				# Save Best U-Net model
				if unet_score > best_unet_score:
					best_unet_score = unet_score
					best_epoch = epoch
					best_unet = self.unet.state_dict()
					print('Best %s model score : %.4f' % (self.model_type, best_unet_score))
					torch.save(best_unet, unet_path)

			# ===================================== Test ====================================#

			del self.unet
			del best_unet
			self.build_model()
			self.unet.load_state_dict(torch.load(unet_path))

			self.unet.train(False)
			self.unet.eval()

			acc = 0.  # Accuracy
			SE = 0.  # Sensitivity (Recall)
			SP = 0.  # Specificity
			PC = 0.  # Precision
			F1 = 0.  # F1 Score
			JS = 0.  # Jaccard Similarity
			DC = 0.  # Dice Coefficient
			length = 0
			for i, (images, GT, filenames) in enumerate(self.test_loader):

				images = images.to(self.device)
				GT = GT.to(self.device).float()
				out = self.unet(images)[1]
				SR = F.sigmoid(out)

				for t in range(len(SR)):
					acc += get_accuracy(SR[t], GT[t])
					SE += get_sensitivity(SR[t], GT[t])
					SP += get_specificity(SR[t], GT[t])
					PC += get_precision(SR[t], GT[t])
					F1 += get_F1(SR[t], GT[t])
					JS += get_JS(SR[t], GT[t])
					DC += get_DC(SR[t], GT[t])
					length += 1
					filename = filenames[t].split(".")[0]

					SR[t] = (SR[t] > 0.5).float()
					torchvision.utils.save_image(SR[t].data.cpu(),
												 os.path.join(self.result_path,
															  '%s_SR.png' % (filename)))
					torchvision.utils.save_image(GT[t].data.cpu(),
												 os.path.join(self.result_path,
															  '%s_GT.png' % (filename)))

			acc = acc / length
			SE = SE / length
			SP = SP / length
			PC = PC / length
			F1 = F1 / length
			JS = JS / length
			DC = DC / length
			unet_score = JS + DC
			print('[test] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f ,unet_score: %.4f' % (
				acc, SE, SP, PC, F1, JS, DC, unet_score))

			f = open(os.path.join(self.result_path, 'result.csv'), 'a', encoding='utf-8', newline='')
			wr = csv.writer(f)
			wr.writerow(
				[self.model_type, acc, SE, SP, PC, F1, JS, DC, unet_score, self.lr, self.num_epochs,
				 self.num_epochs_decay,
				 self.augmentation_prob])
			f.close()
			del self.unet