# Train the D2BeGan

from __future__ import print_function
import os
import scipy.io as scio
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import time
from scipy.misc import imsave
import scipy.ndimage
from collections import deque
from tqdm import trange

from Generator import Generator
from Discriminator import DiscriminatorCNN
from LOSS import SSIM_LOSS, L1_LOSS, Fro_LOSS, _tf_fspecial_gauss
# from generate import generate
from utils import *


patch_size = 64
# TRAINING_IMAGE_SHAPE = (patch_size, patch_size, 2)  # (height, width, color_channels)

LEARNING_RATE = 0.0002
EPSILON = 1e-5
DECAY_RATE = 0.91
eps = 1e-8
MAX_LR,MIN_LR = 2e-4,5e-5


MODEL_NAME,a, b ,k_vis, k_ir,LAMD,GAM, = "0",0,0,0,0,0,0
# set some params
def many_train(sources, MODEL_SAVE_PATH, EPOCHES, BATCH_SIZE, logging_period):
	global MODEL_NAME ,a, b ,k_vis, k_ir,LAMD,GAM
	import json
	with open("model_param.json", "r", encoding="utf8") as f:
		params = json.loads(f.read())
		for p in params:
			MODEL_NAME = str(p["model"])
			a, b = float(p["a"]),float(p["b"])
			k_vis, k_ir = float(p["k_vi"]),float(p["k_ir"])
			LAMD,GAM = float(p["lamd"]),float(p["gam"])

			train(sources, MODEL_SAVE_PATH, EPOCHES, BATCH_SIZE, logging_period)


from utils import WPrint
w_print = WPrint()

def train(source_imgs, save_path, EPOCHES_set, BATCH_SIZE, logging_period = 1):
	from datetime import datetime
	start_time = datetime.now()
	EPOCHS = EPOCHES_set
	w_print('Epoches: %d, Batch_size: %d' % (EPOCHS, BATCH_SIZE))
	# 上面打印训练轮数和bs

	num_imgs = source_imgs.shape[0]		# 获取图片张数
	mod = num_imgs % BATCH_SIZE
	n_batches = int(num_imgs // BATCH_SIZE)
	w_print('Train images number %d, Batches: %d.\n' % (num_imgs, n_batches))

	if mod > 0:
		w_print('Train set has been trimmed %d samples...\n' % mod)
		source_imgs = source_imgs[:-mod]	# 抛弃了不够最后一批的尾巴

	# 上面进行数据准备
	###----------------新加入的-------------
	os.environ["CUDA_VISIBLE_DEVICES"] = '1'
	# 按照固定的比例分配。
	config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
	# 以下代码会占用所有可使用的GPU的50%显存，已试过80%
	config.gpu_options.per_process_gpu_memory_fraction = 0.5
	config.gpu_options.allow_growth = True

	# create the graph
	with tf.Graph().as_default(), tf.Session(config=config) as sess:
		SOURCE_VIS = tf.placeholder(tf.float32, shape = (BATCH_SIZE, patch_size, patch_size, 1), name = 'SOURCE_VIS')
		SOURCE_IR = tf.placeholder(tf.float32, shape = (BATCH_SIZE, patch_size, patch_size, 1), name = 'SOURCE_IR')
		w_print('source_vis shape:', SOURCE_VIS.shape)	# 打印实际尺寸

		FUSED_IMG = Generator('Generator').transform(vis = SOURCE_VIS, ir = SOURCE_IR)
		w_print('generate:', FUSED_IMG.shape)

		generated_img = nhwc_to_nchw(FUSED_IMG)
		source_ir, source_vis = nhwc_to_nchw(SOURCE_IR), nhwc_to_nchw(SOURCE_VIS)
		# 判别器1: 可见光与生成图像
		fus_and_vis = tf.concat([generated_img, source_vis], 0)

		# 临时设置,简化每次都要改两次为一次,避免遗漏
		IMAGE_CHANNEL = 1
		REPEAT_CNN = 4
		HIDDEN_NUM = 128
		# 下面的repeat_num，暂4->3,为了更好的效果，可能要改回4.但是，前提是h，w可以被8整除
		# 1是指input_channel，也即是图像通道，也即是D编码解码后的图像通道数
		d_out, D_z,D_var = DiscriminatorCNN("Discriminator1",fus_and_vis, IMAGE_CHANNEL, patch_size, REPEAT_CNN,HIDDEN_NUM,'NCHW')
		AE_G, AE_vi = tf.split(d_out, 2)  # 将D的生成结果平分,即将生成图像G和原始图像x的判别结果分开
		I_D1_fus,I_D1_vis = denorm_img(AE_G, 'NCHW'), denorm_img(AE_vi, 'NCHW')  # 这是经过D网络的 generated_img 和 X

		# 判别器2: 红外与生成图像
		fus_and_ir = tf.concat([generated_img, source_ir], 0)
		d2_out, D2_z,D2_var = DiscriminatorCNN("Discriminator2",fus_and_ir, IMAGE_CHANNEL, patch_size, REPEAT_CNN,HIDDEN_NUM,'NCHW')
		AE_G2, AE_ir = tf.split(d2_out, 2)  # 将D的生成结果平分,即将生成图像G和原始图像x的判别结果分开
		I_D2_fus,I_D2_ir = denorm_img(AE_G2, 'NCHW'),denorm_img(AE_ir, 'NCHW')  # 这是经过D网络的 generated_img 和 X


		I_fusion = denorm_img(generated_img, 'NCHW')	# 准备查看的图像这是！此外还有I_D2_fus等
		# （函数在下面）调整成正常图片形状 (16, 64, 64, 3) 和大小 0~255，这是G



		# new_g_lr = tf.maximum(g_lr * 0.5, 2e-05)
		# g_lr_update = tf.assign(g_lr,new_g_lr,name='g_lr_update')
		g_lr, d1_lr, d2_lr = tf.Variable(MAX_LR, name='g_lr'),tf.Variable(MAX_LR, name='d1_lr'),tf.Variable(MAX_LR, name='d2_lr')
		lr_deadline = tf.Variable(MIN_LR, name='lr_deadline')
		g_lr_update = tf.assign(g_lr, tf.maximum(g_lr * DECAY_RATE, lr_deadline), name='g_lr_update')  # ¸³Öµ ²¢ ¶¨ÒåÐÂµÄ½Úµã
		d1_lr_update = tf.assign(d1_lr, tf.maximum(d1_lr * DECAY_RATE, lr_deadline), name='d1_lr_update')
		d2_lr_update = tf.assign(d2_lr, tf.maximum(d2_lr * DECAY_RATE, lr_deadline), name='d2_lr_update')

		# 鉴别器D 损失函数
		d1_loss_real = tf.reduce_mean(tf.abs(AE_vi - source_vis))
		d2_loss_real = tf.reduce_mean(tf.abs(AE_ir - source_ir))
		d1_loss_fake = tf.reduce_mean(tf.abs(AE_G - generated_img))
		d2_loss_fake = tf.reduce_mean(tf.abs(AE_G2 - generated_img))


		# 生成器G 损失函数
		g1_loss = tf.reduce_mean(tf.abs(AE_G - generated_img))
		g2_loss = tf.reduce_mean(tf.abs(AE_G2 - generated_img))
		# g_loss_gan = g1_loss + 0.6 * g2_loss	 # 对抗损失
		g_loss_gan = g1_loss + GAM * g2_loss     # 对抗损失

		ALPHA, BETA =  a, b
		# 5.0, 2.0
		grad = tf.reduce_mean(tf.square(gradient(FUSED_IMG) - gradient(SOURCE_IR))) + \
			   BETA * tf.reduce_mean(tf.square(gradient(FUSED_IMG) - gradient(SOURCE_VIS)))

		ssim = (ssim_fn(FUSED_IMG, SOURCE_IR) + ssim_fn(FUSED_IMG, SOURCE_VIS)) / 2.

		g_loss_2 = (1. - ssim) + ALPHA * grad	# 细节损失

		LAMBDA = LAMD			# 超参数： 设置对抗损失和纹理损失的权重比例
		G_loss = g_loss_gan + LAMBDA * g_loss_2

		# 临时设置2个参数
		gamma1, gamma2 = 0.5, 0.5  # 超参数： 用于总measure测度训练程度时，g—loss和d-loss的平衡
		k_t_vis = tf.Variable(k_vis, trainable=False, name='k_t_vis')
		k_t_ir = tf.Variable(k_ir, trainable=False, name='k_t_ir')


		# 鉴别器D 损失函数
		D1_loss = d1_loss_real - k_t_vis * d1_loss_fake
		D2_loss = d2_loss_real - k_t_ir * d2_loss_fake


		step = tf.Variable(0, name='step', trainable=False)
		# 设置3个的 优化器
		theta_G = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Generator')
		g_optimizer = tf.train.AdamOptimizer(g_lr)
		d_optimizer = tf.train.AdamOptimizer(d1_lr)
		d2_optimizer =tf.train.AdamOptimizer(d2_lr)

		D1_optim = d_optimizer.minimize(D1_loss, var_list=D_var,name="D1_optim")
		D2_optim = d2_optimizer.minimize(D2_loss, var_list=D2_var,name="D2_optim")
		G_optim = g_optimizer.minimize(G_loss, global_step=step, var_list=theta_G,name="G_optim")

		# 用于measure时，这儿不知道怎么加入d2_loss,对于measure的公式，仍然需要进一步研究
		# balance = gamma * d1_loss_real  - g1_loss
		# measure = d1_loss_real +  tf.abs(balance)

		balance = gamma1 * d1_loss_real - g1_loss + gamma2 * d2_loss_real - g2_loss
		measure =d1_loss_real + d2_loss_real + tf.abs(balance)	# 总体进度的一个表示


		# 将一个张量中的数值限制在一个范围： 为什么？
		sess.run(tf.global_variables_initializer())
		saver = tf.train.Saver(max_to_keep = 500)

		tf.summary.scalar('G_Loss_D1', g1_loss)
		tf.summary.scalar('G_Loss_D2', g2_loss)
		tf.summary.scalar('G_Loss_gan', g_loss_gan)
		tf.summary.scalar("G_loss_total",G_loss)
		tf.summary.scalar('D1_real', tf.reduce_mean(d1_loss_real))
		tf.summary.scalar('D1_fake', tf.reduce_mean(d1_loss_fake))
		tf.summary.scalar('D2_real', tf.reduce_mean(d2_loss_real))
		tf.summary.scalar('D2_fake', tf.reduce_mean(d2_loss_fake))
		
		tf.summary.image('fused_img', I_fusion)
		tf.summary.image('d1_fused_img', I_D1_fus)
		tf.summary.image('d1_vis_img', I_D1_vis)
		tf.summary.image('d2_fused_img', I_D2_fus)
		tf.summary.image('d2_ir_img', I_D2_ir)

		tf.summary.scalar('G_lr', g_lr)
		tf.summary.scalar('D1_lr',d1_lr)
		tf.summary.scalar('D2_lr', d2_lr)


		merged = tf.summary.merge_all()
		writer = tf.summary.FileWriter("logs/", sess.graph)

		# ** Start Training **
		# 首先，定义一些临时变量，用于兼容BEGan的代码
		lr_update_step = 100000
		# log_step = 50 # 每50步保存一下参数摘要等

		step = 0
		# count_loss = 0
		# num_imgs = source_imgs.shape[0]
		measure_history = deque([0] * lr_update_step, lr_update_step)

		for epoch in trange(EPOCHS):
			np.random.shuffle(source_imgs)	#首先,打乱source_imgs;
			for batch in range(n_batches):	# 共有几个batch
				step += 1
				VIS_batch = source_imgs[batch * BATCH_SIZE:(batch * BATCH_SIZE + BATCH_SIZE), :, :, 0]	# 获取数据
				IR_batch = source_imgs[batch * BATCH_SIZE:(batch * BATCH_SIZE + BATCH_SIZE), :, :, 1]
				VIS_batch = np.expand_dims(VIS_batch, -1)	# 再增加一维，扩展到4维
				IR_batch = np.expand_dims(IR_batch, -1)

				FEED_DICT = {SOURCE_VIS: VIS_batch, SOURCE_IR: IR_batch,}

				fetch_dict = {"d1_optim":D1_optim,"d2_optim":D2_optim,"g_optim":G_optim,\
							  "d1_loss": D1_loss,"d2_loss": D2_loss,\
							  "g_loss": G_loss,"g_loss_gan":g_loss_gan,"g_loss_2":g_loss_2,\
							  "measure": measure}
				
				if step % logging_period == 0:  # logging_period: 40(默认1)
					fetch_dict.update({  			# 每40步保存一下这些
						"summary": merged,
					})

				result = sess.run(fetch_dict,FEED_DICT)  # 运行一次,得到要保存的值
				measure_value = result['measure']
				measure_history.append(measure_value)  # 得到总测量值measure,添加到历史记录器-双头队列中

				g_loss_value = result['g_loss']
				d1_loss_value = result['d1_loss']
				d2_loss_value = result['d2_loss']

				g_loss_2_value = result["g_loss_2"]
				g_loss_gan_value = result["g_loss_gan"]
				
				# 这儿使用BEGan的训练方法,删除了DDcGan的平衡训练方法
				# 但是DDcGan仍然有值得借鉴的地方,因为双鉴别器,是它的特征
				# 仍然需要平衡2个鉴别器
				MAX_D_ITER = 10
				BIG_THAN = (1 + 0.2)
				while d1_loss_value > d2_loss_value * BIG_THAN  and MAX_D_ITER:
					sess.run([D1_optim, ], feed_dict=FEED_DICT)
					d1_loss_value = sess.run(D1_loss, feed_dict=FEED_DICT)
					MAX_D_ITER -=1
				while d2_loss_value > d1_loss_value * BIG_THAN and MAX_D_ITER:
					sess.run([D2_optim, ], feed_dict=FEED_DICT)
					d2_loss_value = sess.run(D2_loss, feed_dict=FEED_DICT)
					MAX_D_ITER -= 1

				# 这部分是新加的！
				MAX_G_ITER = 5
				while (g_loss_gan_value > 0.5) and MAX_G_ITER:
					sess.run([G_optim, ], feed_dict = FEED_DICT)
					g_loss_gan_value = sess.run(g_loss_gan, feed_dict = FEED_DICT)
					MAX_G_ITER -= 1

				# 记录summary，保存模型（40一次）
				if step % logging_period == 0:
					writer.add_summary(result['summary'], step)  # 调用add_summary方法将训练过程以及训练步数保存
					writer.flush()  # 填充缓冲区

					sess.run([g_lr_update, d1_lr_update, d2_lr_update])	# 调整lr

					model_path = save_path + str(step) + '/' + str(step) + '.ckpt'
					saver.save(sess, model_path)
					w_print(f"--- Model saved in {model_path} -----")

				# 如果是最后一步，或者每10步输出一次训练结果
				is_last_step = (epoch == EPOCHS - 1) and (batch == n_batches - 1)
				if is_last_step or batch % 10 == 0:  	# 每10个批次，输出一下损失值
					elapsed_time = datetime.now() - start_time
					w_print(f"Epoch{epoch}[{step}/{n_batches}] lr:{(g_lr.eval(),d1_lr.eval(),d2_lr.eval())},"
							f"loss:{(g_loss_value,g_loss_gan_value,g_loss_2_value), d1_loss_value, d2_loss_value} "
							f"measure:{measure_value} elapsed_time:{elapsed_time}")

				# 这儿砍掉一段，是对当前模型的生成结果的检验
				# 具体的是，噪音输入G后的结果1，以及结果1和原始图输入d的结果2

		writer.close()
		w_print.finished()	# 自己加的,用于记录最后一次活动

		# fuse test imgs
		# from main_gen import gen_img_main
		# gen_img_main(MODEL_NAME)



























