import logging
import os
import socket
from datetime import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import tqdm
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import utils.loss as LS
import utils.utils as u
from dataset.PiFu import PiFu
from model.R2U_net import R2U_Net, BaseNet_version
from utils.config import DefaultConfig, train_info



def new_test(args, epoch, model, dataloader):
	# 函数已修改，适应皮肤分割的单通道输出
	# print()
	# print('Start Validation!')
	# print("dataloader_val size:", len(dataloader))
	dice = precsion = jaccard = 0
	n_val = len(dataloader)  # the number of batch
	with torch.no_grad():
		model.eval()
		tbar = tqdm.tqdm(total=len(dataloader) * args.batch_size_val)
		tbar.set_description('val %d' % epoch)
		for i, (data, labels, boundary) in enumerate(dataloader):
			# tbar.update()
			if torch.cuda.is_available() and args.use_gpu:
				data = data.cuda()
				labels = labels.cuda()
			# print("data size:", data.size())
			# print("lables size:", labels.size())
			mask_pred, mask_d1, _, _, _ = model(data)
			dice_b, precsion_b, jaccard_b = u.eval_seg(mask_pred, labels)
			dice = dice + dice_b
			precsion = precsion + precsion_b
			jaccard += jaccard_b
			
			tbar.update(args.batch_size_val)
			tbar.set_postfix(dice='%.6f' % (dice / (i + 1)))  # 平均batch的 dice
		tbar.close()
		print()
	model.train()
	return dice / n_val, precsion / n_val, jaccard / n_val


def val(args, epoch, model, dataloader, criterion, writer):
	# 函数已修改，适应皮肤分割的单通道输出
	# print()
	# print('Start Validation!')
	# print("dataloader_val size:", len(dataloader))
	
	dice = precsion = jaccard = sen = spc = 0
	n_val = len(dataloader)  # the number of batch
	test_loss = 0
	with torch.no_grad():
		model.eval()
		tbar = tqdm.tqdm(total=len(dataloader) * args.batch_size_val)
		tbar.set_description('val %d' % epoch)
		for i, (data, labels, boundary) in enumerate(dataloader):
			# tbar.update()
			if torch.cuda.is_available() and args.use_gpu:
				data = data.cuda()
				labels = labels.cuda()
				boundary = boundary.cuda()
			# print("data size:", data.size())
			# print("lables size:", labels.size())
			mask_pred, edge_pred = model(data)
			
			# writer.add_images('Valimg/img', data, epoch + 1)
			# writer.add_images('Valimg/true', labels, epoch + 1)
			# writer.add_images('Valimg/pred', mask_pred > 0.5, epoch + 1)
			for j in range(len(criterion) - 1):
				test_loss += criterion[j](mask_pred, labels)
			
			dice_b, precsion_b, jaccard_b, sen_b, spc_b = u.eval_seg(mask_pred, labels)
			dice += dice_b
			precsion += precsion_b
			jaccard += jaccard_b
			sen += sen_b
			spc += spc_b
			
			tbar.update(args.batch_size_val)
			tbar.set_postfix(dice='%.6f' % (dice / (i + 1)))  # 平均batch的 dice
		tbar.close()
		print()
	model.train()
	return dice / n_val, precsion / n_val, jaccard / n_val, sen / n_val, spc / n_val, test_loss / n_val


def train(args, model, optimizer, criterion, dataloader_train, dataloader_val, dataloader_test):
	current_time = datetime.now().strftime('%b%d_%H-%M-%S')
	print("当前时间", current_time)
	
	log_dir = os.path.join(args.log_dirs, args.BaseNet_version + "_" + str(args.k_fold) + '_' + str(args.lr) + '_' + current_time + '_' + socket.gethostname())
	writer = SummaryWriter(log_dir=log_dir)
	print("log_name: ", log_dir)
	train_info(args, log_dir)  # 记录训练信息
	
	step = 0
	best_pred = 0.0
	for epoch in range(args.num_epochs):
		lr = u.adjust_learning_rate(args, optimizer, epoch)
		model.train()
		
		tq = tqdm.tqdm(total=len(dataloader_train) * args.batch_size)
		tq.set_description('epoch %d, lr0 %f b_dice %f' % (epoch, lr[0], args.b_dice[0]))
		loss_record = []  # 一个epoch的平均loss
		train_loss = 0.0
		# is_best = False
		# dice jacd accu sen spc
		train_dja = [0, 0, 0, 0, 0]
		for i, (data, label, boundary) in enumerate(dataloader_train):
			if torch.cuda.is_available() and args.use_gpu:
				data = data.cuda()
				label = label.cuda().float()
				boundary = boundary.cuda()
			main_seg, main_edge = model(data)
			
			# 获取在训练集上的评价指标
			# dice  precsion  jaccard  sen  spc
			get_train_dja = u.eval_seg(main_seg, label)
			for k in range(len(train_dja)):
				train_dja[k] += get_train_dja[k]
			
			# 计算当前batch的损失
			seg_loss_bce = criterion[0](main_seg, label)
			seg_loss_dice = criterion[1](main_seg, label)
			loss = seg_loss_bce + seg_loss_dice
			
			# 反向传播优化模型
			optimizer.zero_grad()
			loss.backward()
			optimizer.step()
			# 更新打印信息
			tq.update(args.batch_size)
			train_loss += loss.item()
			tq.set_postfix(loss='%.6f' % (train_loss / (i + 1)))  # 平均一个batch的loss
			
			# 每10个batch添加一次loss
			step += 1
			if step % 10 == 0:
				writer.add_scalar('Train/loss_step', loss, step)
			loss_record.append(loss.item())
		tq.close()
		
		# 更新计算平均评价指标
		for k in range(len(train_dja)):
			train_dja[k] /= len(dataloader_train)
		
		loss_train_mean = np.mean(loss_record)
		writer.add_scalar('Train/loss_epoch', float(loss_train_mean), epoch)
		# dice jacd accu sen spc
		writer.add_scalar('Train/dice', float(train_dja[0]), epoch)
		writer.add_scalar('Train/jac', float(train_dja[1]), epoch)
		writer.add_scalar('Train/acc', float(train_dja[2]), epoch)
		writer.add_scalar('Train/sen', float(train_dja[3]), epoch)
		writer.add_scalar('Train/spc', float(train_dja[4]), epoch)
		
		# writer.add_scalar('lr/lr0', float(lr[0]), epoch)
		# writer.add_scalar('lr/lr1', float(lr[1]), epoch)
		# print('loss for train : %f' % loss_train_mean)
		# continue
		if epoch % args.validation_step == 0:
			dice, precsion, jaccard, sen, spc, valoss = val(args, epoch, model, dataloader_val, criterion, writer)
			# dice jacd accu  sen  spc  epoch
			
			if dice > args.b_dice[0]:
				args.b_dice = [dice, jaccard, precsion, sen, spc, epoch]
			if jaccard > args.b_jacd[1]:
				args.b_jacd = [dice, jaccard, precsion, sen, spc, epoch]
			if precsion > args.b_accu[2]:
				args.b_accu = [dice, jaccard, precsion, sen, spc, epoch]
			if sen > args.b_sen[3]:
				args.b_sen = [dice, jaccard, precsion, sen, spc, epoch]
			if spc > args.b_spc[4]:
				args.b_spc = [dice, jaccard, precsion, sen, spc, epoch]
			
			# 保存当前epoch在验证集上的指标
			writer.add_scalar('Valid/dice', dice, epoch)
			writer.add_scalar('Valid/jaccard', jaccard, epoch)
			writer.add_scalar('Valid/precsion', precsion, epoch)
			writer.add_scalar('Valid/sen', sen, epoch)
			writer.add_scalar('Valid/spc', spc, epoch)
			writer.add_scalar('Valid/valoss', valoss, epoch)
			
			# 分别输出以某项指标为标准时对应epoch的指标
			# writer.add_scalar('Dice/dice', args.b_dice[0], epoch)
			# writer.add_scalar('Dice/jaccard', args.b_dice[1], epoch)
			# writer.add_scalar('Dice/precsion', args.b_dice[2], epoch)
			#
			# writer.add_scalar('Jaccard/dice', args.b_jacd[0], epoch)
			# writer.add_scalar('Jaccard/jaccard', args.b_jacd[1], epoch)
			# writer.add_scalar('Jaccard/precsion', args.b_jacd[2], epoch)
			#
			# writer.add_scalar('Precsion/dice', args.b_accu[0], epoch)
			# writer.add_scalar('Precsion/jaccard', args.b_accu[1], epoch)
			# writer.add_scalar('Precsion/precsion', args.b_accu[2], epoch)
			
			is_best = dice > best_pred
			best_pred = max(best_pred, dice)
			checkpoint_dir = args.save_model_path
			# checkpoint_dir=os.path.join(checkpoint_dir_root,str(k_fold))
			if not os.path.exists(checkpoint_dir):
				os.makedirs(checkpoint_dir)
			checkpoint_latest = os.path.join(checkpoint_dir, 'checkpoint_latest.pth')
			u.save_checkpoint({
				'epoch': epoch + 1,
				'state_dict': model.state_dict(),
				'best_dice': best_pred,
			}, best_pred, epoch, is_best, checkpoint_dir, filename=checkpoint_latest)
	
	print("   Dice          Ja           Acc          sen           spc")
	print("Best Dice", args.b_dice)
	print("Best Jacd", args.b_jacd)
	print("Best Accu", args.b_accu)
	print("Best sen", args.b_sen)
	print("Best spc", args.b_spc)
	
	current_time = datetime.now().strftime('%b%d_%H-%M-%S')
	print("当前时间", current_time)


def main(mode='train', args=None):
	# create dataset and dataloader
	dataset_path = os.path.join(args.data, args.dataset)
	print("dataset_path :", dataset_path)
	dataset_train = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.k_fold, mode='train')
	print("训练集数量大小：", len(dataset_train))
	
	dataloader_train = DataLoader(
		dataset_train,
		batch_size=args.batch_size,
		shuffle=True,
		num_workers=args.num_workers,
		pin_memory=True,
		drop_last=True
	)
	print("训练集一个epoch的batch数量：", len(dataloader_train))
	
	dataset_val = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.k_fold, mode='val')
	dataloader_val = DataLoader(
		dataset_val,
		# this has to be 1
		# batch_size=len(args.cuda.split(',')),
		batch_size=args.batch_size_val,
		shuffle=True,
		num_workers=args.num_workers,
		pin_memory=True,
		drop_last=False
	)
	print("验证集数量大小：", len(dataset_val))
	print("验证集一个epoch的batch数量：", len(dataloader_val))
	
	# exit(0)
	dataset_test = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.k_fold, mode='test')
	dataloader_test = DataLoader(
		dataset_test,
		# this has to be 1
		# batch_size=len(args.cuda.split(',')),
		batch_size=args.batch_size_test,
		shuffle=False,
		num_workers=args.num_workers,
		pin_memory=True,
		drop_last=False
	)
	# build model
	os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
	Sync_BN = False
	if len(args.cuda) > 1:
		Sync_BN = True
	
	# load model
	model = R2U_Net()
	cudnn.benchmark = True
	# model._initialize_weights()
	
	print("torch.cuda.is_available() :", torch.cuda.is_available(), " Use :", args.cuda)
	if torch.cuda.is_available() and args.use_gpu:
		model = torch.nn.DataParallel(model).cuda()
	
	# load pretrained model if exists
	if args.pretrained_model_path and mode == 'test':
		print("=> loading pretrained model '{}'".format(args.pretrained_model_path))
		checkpoint = torch.load(args.pretrained_model_path)
		model.load_state_dict(checkpoint['state_dict'])
		print('Done!')
	
	# all_params = list(map(id, model.parameters()))
	# print("model.modules.backbone.parameters() :", len(all_params))
	# backbone_params = list(map(id, model.module.backbone.parameters()))
	# print("model.modules.backbone.parameters() :", len(backbone_params))
	# other_params = filter(lambda p: id(p) not in backbone_params, model.parameters())
	# print("other_params :", len(list(other_params)))
	# train_params = [{'params': model.module.backbone.parameters(), 'lr': args.lr[0]},
	#                 {'params': other_params, 'lr': args.lr[1]}]
	
	optimizer = torch.optim.SGD(model.parameters(), lr=args.lr[0], momentum=args.momentum, weight_decay=args.weight_decay)
	# optimizer = torch.optim.Adam(model.parameters(), lr=args.lr[0], weight_decay=args.weight_decay)
	
	# exit()
	# 网络中加sigmoid还是损失中添加
	# criterion_aux = nn.BCEWithLogitsLoss()
	criterion_aux = nn.BCELoss()
	# criterion_main = LS.Multi_DiceLoss(class_num=args.num_classes)
	criterion_main = LS.DiceLoss()
	# criterion_iou = LS.IOU()
	criterion_cls_ce = nn.CrossEntropyLoss()
	criterion = [criterion_aux, criterion_main, criterion_cls_ce]
	
	if mode == 'train':
		train(args, model, optimizer, criterion, dataloader_train, dataloader_val, dataloader_test)


if __name__ == '__main__':
	seed = 1234
	torch.manual_seed(seed)
	torch.cuda.manual_seed_all(seed)
	torch.backends.cudnn.benchmark = True
	args = DefaultConfig()
	logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
	
	logging.info(f'''Starting training:
	     NetWork:         {args.net_work}
	     Epochs:          {args.num_epochs}
	     Batch size:      {args.batch_size}
	     Batch size_val:  {args.batch_size_val}
	     Learning rate 0: {args.lr[0]}
	     Learning rate 1: {args.lr[1]}
	     Val_k_fold:      {args.k_fold}
	     Checkpoints:     {args.save_model_path}
	     Device cuda:     {args.cuda}
	     ''')
	
	modes = args.mode
	args.BaseNet_version = BaseNet_version
	
	if modes == 'train':
		main(mode='train', args=args)
	elif modes == 'test':
		main(mode='test', args=args)
	elif modes == 'train_test':
		main(mode='train_test', args=args)
