import argparse
# from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from dataset.PiFu import PiFu
from dataset.OCT import OCT
import socket
from datetime import datetime
import os
from model.BaseNet import CPFNet, BaseNet_version
from model.unet import UNet
import torch
from torch.utils.tensorboard import SummaryWriter
import tqdm
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from PIL import Image
import logging
import utils.utils as u
import utils.loss as LS
from utils.config import DefaultConfig, train_info
import torch.backends.cudnn as cudnn


def new_test(args, epoch, model, dataloader):
	# 函数已修改，适应皮肤分割的单通道输出
	# print()
	# print('Start Validation!')
	# print("dataloader_val size:", len(dataloader))
	dice = precsion = jaccard = 0
	n_val = len(dataloader)  # the number of batch
	with torch.no_grad():
		model.eval()
		tbar = tqdm.tqdm(total=len(dataloader) * args.batch_size_val)
		tbar.set_description('val %d' % epoch)
		for i, (data, labels, boundary) in enumerate(dataloader):
			# tbar.update()
			if torch.cuda.is_available() and args.use_gpu:
				data = data.cuda()
				labels = labels.cuda()
			# print("data size:", data.size())
			# print("lables size:", labels.size())
			mask_pred, mask_d1, _, _, _ = model(data)
			dice_b, precsion_b, jaccard_b = u.eval_seg(mask_pred, labels)
			dice = dice + dice_b
			precsion = precsion + precsion_b
			jaccard += jaccard_b
			
			tbar.update(args.batch_size_val)
			tbar.set_postfix(dice='%.6f' % (dice / (i + 1)))  # 平均batch的 dice
		tbar.close()
		print()
	model.train()
	return dice / n_val, precsion / n_val, jaccard / n_val


def val(args, epoch, model, dataloader):
	# 函数已修改，适应皮肤分割的单通道输出
	# print()
	# print('Start Validation!')
	# print("dataloader_val size:", len(dataloader))
	dice = precsion = jaccard = 0
	n_val = len(dataloader)  # the number of batch
	with torch.no_grad():
		model.eval()
		tbar = tqdm.tqdm(total=len(dataloader) * args.batch_size_val)
		tbar.set_description('val %d' % epoch)
		for i, (data, labels, boundary) in enumerate(dataloader):
			# tbar.update()
			if torch.cuda.is_available() and args.use_gpu:
				data = data.cuda()
				labels = labels.cuda()
			# print("data size:", data.size())
			# print("lables size:", labels.size())
			mask_pred, mask_d1 = model(data)
			dice_b, precsion_b, jaccard_b = u.eval_seg(mask_pred, labels)
			dice = dice + dice_b
			precsion = precsion + precsion_b
			jaccard += jaccard_b
			
			tbar.update(args.batch_size_val)
			tbar.set_postfix(dice='%.6f' % (dice / (i + 1)))  # 平均batch的 dice
		tbar.close()
		print()
	model.train()
	return dice / n_val, precsion / n_val, jaccard / n_val


def train(args, model, optimizer, criterion, dataloader_train, dataloader_val, dataloader_test):
	current_time = datetime.now().strftime('%b%d_%H-%M-%S')
	print("当前时间", current_time)
	
	log_dir = os.path.join(args.log_dirs, args.BaseNet_version + "_" + str(args.k_fold) + '_' + str(args.lr) + '_' + current_time + '_' + socket.gethostname())
	writer = SummaryWriter(log_dir=log_dir)
	print("log_name: ", log_dir)
	train_info(args, log_dir)  # 记录训练信息
	
	step = 0
	best_pred = 0.0
	for epoch in range(args.num_epochs):
		lr = u.adjust_learning_rate(args, optimizer, epoch)
		model.train()
		
		tq = tqdm.tqdm(total=len(dataloader_train) * args.batch_size)
		tq.set_description('epoch %d, lr0 %f,lr1 %f, b_dice %f' % (epoch, lr[0], lr[1], args.b_dice[0]))
		loss_record = []  # 一个epoch的平均loss
		train_loss = 0.0
		# is_best = False
		for i, (data, label, boundary) in enumerate(dataloader_train):
			if torch.cuda.is_available() and args.use_gpu:
				data = data.cuda()
				label = label.cuda().float()
			
			main_seg, main_edge = model(data)
			
			seg_loss_bce = criterion[0](main_seg, label)
			seg_loss_dice = criterion[1](main_seg, label)
			seg_loss_iou = criterion[2](main_seg, label)
	
			loss = seg_loss_bce + seg_loss_dice + seg_loss_iou
		
			optimizer.zero_grad()
			loss.backward()
			optimizer.step()
			
			tq.update(args.batch_size)
			train_loss += loss.item()
			tq.set_postfix(loss='%.6f' % (train_loss / (i + 1)))  # 平均一个batch的loss
			
			# 每10个batch添加一次loss
			step += 1
			if step % 10 == 0:
				writer.add_scalar('Train/loss_step', loss, step)
			loss_record.append(loss.item())
		
		tq.close()
		loss_train_mean = np.mean(loss_record)
		writer.add_scalar('Train/loss_epoch', float(loss_train_mean), epoch)
		
		writer.add_scalar('lr/lr0', float(lr[0]), epoch)
		writer.add_scalar('lr/lr1', float(lr[1]), epoch)
		# print('loss for train : %f' % loss_train_mean)
		# continue
		if epoch % args.validation_step == 0:
			dice, precsion, jaccard = val(args, epoch, model, dataloader_val)
			# dice jacd accu epoch
			if dice > args.b_dice[0]:
				args.b_dice = [dice, jaccard, precsion, epoch]
			if jaccard > args.b_jacd[1]:
				args.b_jacd = [dice, jaccard, precsion, epoch]
			if precsion > args.b_accu[2]:
				args.b_accu = [dice, jaccard, precsion, epoch]
			# 输出当前epoch的指标
			writer.add_scalar('Valid/dice', dice, epoch)
			writer.add_scalar('Valid/jaccard', jaccard, epoch)
			writer.add_scalar('Valid/precsion', precsion, epoch)
			# 分别输出以某项指标为标准时对应epoch的指标
			writer.add_scalar('Dice/dice', args.b_dice[0], epoch)
			writer.add_scalar('Dice/jaccard', args.b_dice[1], epoch)
			writer.add_scalar('Dice/precsion', args.b_dice[2], epoch)
			
			writer.add_scalar('Jaccard/dice', args.b_jacd[0], epoch)
			writer.add_scalar('Jaccard/jaccard', args.b_jacd[1], epoch)
			writer.add_scalar('Jaccard/precsion', args.b_jacd[2], epoch)
			
			writer.add_scalar('Precsion/dice', args.b_accu[0], epoch)
			writer.add_scalar('Precsion/jaccard', args.b_accu[1], epoch)
			writer.add_scalar('Precsion/precsion', args.b_accu[2], epoch)
			
			is_best = dice > best_pred
			best_pred = max(best_pred, dice)
			checkpoint_dir = args.save_model_path
			# checkpoint_dir=os.path.join(checkpoint_dir_root,str(k_fold))
			if not os.path.exists(checkpoint_dir):
				os.makedirs(checkpoint_dir)
			checkpoint_latest = os.path.join(checkpoint_dir, 'checkpoint_latest.pth')
			u.save_checkpoint({
				'epoch': epoch + 1,
				'state_dict': model.state_dict(),
				'best_dice': best_pred,
			}, best_pred, epoch, is_best, checkpoint_dir, filename=checkpoint_latest)
	
	print("          Dice                  Ja                Acc")
	print("Best Dice", args.b_dice)
	print("Best Jacd", args.b_jacd)
	print("Best Accu", args.b_accu)
	current_time = datetime.now().strftime('%b%d_%H-%M-%S')
	print("当前时间", current_time)


def test(model, dataloader, args):
	print('start test!')
	with torch.no_grad():
		model.eval()
		# precision_record = []
		tq = tqdm.tqdm(dataloader, desc='\r')
		tq.set_description('test')
		comments = os.getcwd().split('/')[-1]
		for i, (data, label_path) in enumerate(tq):
			if torch.cuda.is_available() and args.use_gpu:
				data = data.cuda()
			# label = label.cuda()
			aux_pred, predict = model(data)
			
			predict = torch.argmax(torch.exp(predict), dim=1)
			pred = predict.data.cpu().numpy()
			pred_RGB = OCT.COLOR_DICT[pred.astype(np.uint8)]
			
			for index, item in enumerate(label_path):
				save_img_path = label_path[index].replace('test_mask', comments + '_mask')
				if not os.path.exists(os.path.dirname(save_img_path)):
					os.makedirs(os.path.dirname(save_img_path))
				img = Image.fromarray(pred_RGB[index].squeeze().astype(np.uint8))
				img.save(save_img_path)
				tq.set_postfix(str=str(save_img_path))
		tq.close()


def main(mode='train', args=None):
	# create dataset and dataloader
	dataset_path = os.path.join(args.data, args.dataset)
	print("dataset_path :", dataset_path)
	dataset_train = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.k_fold, mode='train')
	print("训练集数量大小：", len(dataset_train))
	
	dataloader_train = DataLoader(
		dataset_train,
		batch_size=args.batch_size,
		shuffle=True,
		num_workers=args.num_workers,
		pin_memory=True,
		drop_last=True
	)
	print("训练集一个epoch的batch数量：", len(dataloader_train))
	
	dataset_val = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.k_fold, mode='val')
	dataloader_val = DataLoader(
		dataset_val,
		# this has to be 1
		# batch_size=len(args.cuda.split(',')),
		batch_size=args.batch_size_val,
		shuffle=False,
		num_workers=args.num_workers,
		pin_memory=True,
		drop_last=False
	)
	print("验证集数量大小：", len(dataset_val))
	print("验证集一个epoch的batch数量：", len(dataloader_val))
	
	# exit(0)
	dataset_test = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.k_fold, mode='test')
	dataloader_test = DataLoader(
		dataset_test,
		# this has to be 1
		# batch_size=len(args.cuda.split(',')),
		batch_size=args.batch_size_test,
		shuffle=False,
		num_workers=args.num_workers,
		pin_memory=True,
		drop_last=False
	)
	# build model
	os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
	
	# load model
	model_all = {'BaseNet': CPFNet(out_planes=args.num_classes), 'UNet': UNet(in_channels=1, n_classes=args.num_classes)}
	model = model_all[args.net_work]
	cudnn.benchmark = True
	# model._initialize_weights()
	
	print("torch.cuda.is_available() :", torch.cuda.is_available(), " Use :", args.cuda)
	if torch.cuda.is_available() and args.use_gpu:
		model = torch.nn.DataParallel(model).cuda()
	
	# load pretrained model if exists
	if args.pretrained_model_path and mode == 'test':
		print("=> loading pretrained model '{}'".format(args.pretrained_model_path))
		checkpoint = torch.load(args.pretrained_model_path)
		model.load_state_dict(checkpoint['state_dict'])
		print('Done!')
	optimizer = torch.optim.SGD(model.parameters(), lr=args.lr[0], momentum=args.momentum, weight_decay=args.weight_decay)
	
	# exit()
	# 网络中加sigmoid还是损失中添加
	# criterion_aux = nn.BCEWithLogitsLoss()
	criterion_aux = nn.BCELoss()
	# criterion_main = LS.Multi_DiceLoss(class_num=args.num_classes)
	criterion_main = LS.DiceLoss()
	criterion_iou = LS.IOU()
	criterion_msssim = LS.MSSSIM()
	criterion = [criterion_aux, criterion_main, criterion_iou, criterion_msssim]
	
	if mode == 'train':
		train(args, model, optimizer, criterion, dataloader_train, dataloader_val, dataloader_test)
	if mode == 'test':
		test(model, dataloader_test, args)
	if mode == 'train_test':
		train(args, model, optimizer, criterion, dataloader_train, dataloader_val)
		test(model, dataloader_test, args)


if __name__ == '__main__':
	seed = 1234
	torch.manual_seed(seed)
	torch.cuda.manual_seed_all(seed)
	args = DefaultConfig()
	logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
	logging.info(f'''Starting training:
         NetWork:         {args.net_work}
         Epochs:          {args.num_epochs}
         Batch size:      {args.batch_size}
         Batch size_val:  {args.batch_size_val}
         Learning rate 0: {args.lr[0]}
         Learning rate 1: {args.lr[1]}
         Val_k_fold:      {args.k_fold}
         Checkpoints:     {args.save_model_path}
         Device cuda:     {args.cuda}
         ''')
	
	modes = args.mode
	args.BaseNet_version = BaseNet_version
	
	if modes == 'train':
		main(mode='train', args=args)
	elif modes == 'test':
		main(mode='test', args=args)
	elif modes == 'train_test':
		main(mode='train_test', args=args)
