# coding: utf-8
from __future__ import print_function
import os
import time
import random
from PIL import Image
import tensorflow as tf
import numpy as np
import cv2
import math
from utils import *
from model import *
from glob import glob
from skimage.measure import compare_ssim
from npu_bridge.estimator import npu_ops
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig

def cal_psnr(im1, im2):
#     image1 = np.array(im1, dtype=np.uint8)
#     image2 = np.array(im2, dtype=np.uint8)
    im1 = im1.reshape(im2.shape)
    if (im1.shape != im2.shape):
        print("two images have different shape!")
        return 0
#     mse = (np.abs(im1 - im2) ** 2).mean()
#     psnr = 10 * np.log10(255 * 255 / mse)

#这里输入的是（0,255）的灰度或彩色图像，如果是彩色图像，则numpy.mean相当于对三个通道计算的结果再求均值
    mse = np.mean((im1 / 255. - im2 / 255.) ** 2)
    if mse < 1.0e-10: # 如果两图片差距过小代表完美重合
        return 100
    PIXEL_MAX = 1.0
    return 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) # 将对数中pixel_max的平方放了下来



def cal_ssim(image1, image2):
#     image1_gray = tf.image.rgb_to_grayscale(image1)
#     image2_gray = tf.image.rgb_to_grayscale(image2)
#     ssim =tf.reduce_mean(tf.image.ssim(image1_gray, image2_gray, max_val=1))

#      image2 = image2.reshape(image1.shape)
#     if (image1.shape != image2.shape):
#         print("two images have different shape!")
#         return 0   
    
#     mu1 = image1.mean()
#     mu2 = image2.mean()
#     sigma1 = np.sqrt(((image1 - mu1) ** 2).mean())
#     sigma2 = np.sqrt(((image2 - mu2) ** 2).mean())
#     sigma12 = ((image1 - mu1) * (image2 - mu2)).mean()
#     k1, k2, L = 0.01, 0.03, 255
#     C1 = (k1 * L) ** 2
#     C2 = (k2 * L) ** 2
#     C3 = C2 / 2
#     l12 = (2 * mu1 * mu2 + C1) / (mu1 ** 2 + mu2 ** 2 + C1)
#     c12 = (2 * sigma1 * sigma2 + C2) / (sigma1 ** 2 + sigma2 ** 2 + C2)
#     s12 = (sigma12 + C3) / (sigma1 * sigma2 + C3)
#     ssim = l12 * c12 * s12
    
    image1 = np.array(image1, dtype=np.float32)
    image2 = np.array(image2, dtype=np.float32)
    image2 = image2.reshape(image1.shape)
# 通道分离，注意顺序BGR不是RGB
    (B1, G1, R1) = cv2.split(image1)
    (B2, G2, R2) = cv2.split(image2)
# 　　# convert the images to grayscale BGR2GRAY
# 　　gray1 = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)

# 　　grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)

# 　　方法一

# 　　(grayScore, diff) = compare_ssim(grayA, grayB, full=True)

# 　　diff = (diff * 255).astype("uint8")

# 　　print("gray SSIM: {}".format(grayScore))

# 　　方法二

    (score0, diffB) = compare_ssim(B1, B2, full=True)
    (score1, diffG) = compare_ssim(G1, G2, full=True)
    (score2, diffR) = compare_ssim(R1, R2, full=True)
    aveScore = (score0+score1+score2)/3
#     print("BGR average SSIM: {}".format(aveScore ))

    return aveScore

# Create a session.
config = tf.ConfigProto()
custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
custom_op.parameter_map["use_off_line"].b = True  # Must be explicitly enabled for training on Ascend AI Processor.
config.graph_options.rewrite_options.remapping = RewriterConfig.OFF  # Remapping must be disabled explicitly.
config.graph_options.rewrite_options.optimizers.extend(["GradFusionOptimizer"]) # Required in the distributed training scenario.

sess = tf.Session(config=config)

input_decom = tf.placeholder(tf.float32, [None, None, None, 3], name='input_decom')
input_low_r = tf.placeholder(tf.float32, [None, None, None, 3], name='input_low_r')
input_low_i = tf.placeholder(tf.float32, [None, None, None, 1], name='input_low_i')
input_high_r = tf.placeholder(tf.float32, [None, None, None, 3], name='input_high_r')
input_high_i = tf.placeholder(tf.float32, [None, None, None, 1], name='input_high_i')
input_low_i_ratio = tf.placeholder(tf.float32, [None, None, None, 1], name='input_low_i_ratio')

[R_decom, I_decom] = DecomNet_simple(input_decom)
decom_output_R = R_decom
decom_output_I = I_decom
output_r = Restoration_net(input_low_r, input_low_i)
output_i = Illumination_adjust_net(input_low_i, input_low_i_ratio)

var_Decom = [var for var in tf.trainable_variables() if 'DecomNet' in var.name]
var_adjust = [var for var in tf.trainable_variables() if 'Illumination_adjust_net' in var.name]
var_restoration = [var for var in tf.trainable_variables() if 'Restoration_net' in var.name]

saver_Decom = tf.train.Saver(var_list=var_Decom)
saver_adjust = tf.train.Saver(var_list=var_adjust)
saver_restoration = tf.train.Saver(var_list=var_restoration)

decom_checkpoint_dir = './checkpoint/decom_net_train/'
ckpt_pre = tf.train.get_checkpoint_state(decom_checkpoint_dir)
if ckpt_pre:
    print('loaded ' + ckpt_pre.model_checkpoint_path)
    saver_Decom.restore(sess, ckpt_pre.model_checkpoint_path)
else:
    print('No decomnet checkpoint!')

checkpoint_dir_adjust = './checkpoint/illumination_adjust_net_train/'
ckpt_adjust = tf.train.get_checkpoint_state(checkpoint_dir_adjust)
if ckpt_adjust:
    print('loaded ' + ckpt_adjust.model_checkpoint_path)
    saver_adjust.restore(sess, ckpt_adjust.model_checkpoint_path)
else:
    print("No adjust pre model!")

checkpoint_dir_restoration = './checkpoint/Restoration_net_train/'
ckpt = tf.train.get_checkpoint_state(checkpoint_dir_restoration)
if ckpt:
    print('loaded ' + ckpt.model_checkpoint_path)
    saver_restoration.restore(sess, ckpt.model_checkpoint_path)
else:
    print("No restoration pre model!")

###load our data
our_low_data = []
our_img_name = []
our_low_data_name = glob('./LOLdataset/our485/low/*.png')
our_low_data_name.sort()
for idx in range(len(our_low_data_name)):
    [_, name] = os.path.split(our_low_data_name[idx])
    suffix = name[name.find('.') + 1:]
    name = name[:name.find('.')]
    our_img_name.append(name)
    our_low_im = load_images(our_low_data_name[idx])
    our_low_data.append(our_low_im)
    #print(our_low_im.shape)
# To get better results, the illumination adjustment ratio is computed based on the decom_i_high, so we also need the high data.
our_high_data = []
our_high_data_name = glob('./LOLdataset/our485/high/*.png')
our_high_data_name.sort()
for idx in range(len(our_high_data_name)):
    our_high_im = load_images(our_high_data_name[idx])
    our_high_data.append(our_high_im)

sample_dir = './results/LOLdataset_our485/'
if not os.path.isdir(sample_dir):
    os.makedirs(sample_dir)

print("Start evaluating!")
start_time = time.time()
ssim = 0.0
pnsr = 0.0
for idx in range(len(our_low_data)):
    print(idx)
    name = our_img_name[idx]
    input_low = our_low_data[idx]
    input_low_our = np.expand_dims(input_low, axis=0)
    input_high = our_high_data[idx]
    input_high_our = np.expand_dims(input_high, axis=0)
    h, w, _ = input_low.shape

    decom_r_low, decom_i_low = sess.run([decom_output_R, decom_output_I], feed_dict={input_decom: input_low_our})
    decom_r_high, decom_i_high = sess.run([decom_output_R, decom_output_I], feed_dict={input_decom: input_high_our})

    restoration_r = sess.run(output_r, feed_dict={input_low_r: decom_r_low, input_low_i: decom_i_low})

    ratio = np.mean(((decom_i_high)) / (decom_i_low + 0.0001))

    i_low_data_ratio = np.ones([h, w]) * (ratio)
    i_low_ratio_expand = np.expand_dims(i_low_data_ratio, axis=2)
    i_low_ratio_expand2 = np.expand_dims(i_low_ratio_expand, axis=0)

    adjust_i = sess.run(output_i, feed_dict={input_low_i: decom_i_low, input_low_i_ratio: i_low_ratio_expand2})
    fusion = restoration_r * adjust_i
    save_images(os.path.join(sample_dir, '%s_kindle.png' % (name)), fusion)
    ssim = ssim + cal_ssim(input_high, fusion) #(400,600,3) (1,400,600,3)
    pnsr = pnsr + cal_psnr(input_high, fusion)
    print("pnsr: %.4f ssim: %.4f" % (pnsr, ssim))
ssim = ssim / len(our_low_data)
pnsr = pnsr / len(our_low_data)
# sess = tf.Session()
# print(sess.run(ssim))
# print("[*] pnsr: %.4f ssim: " % (pnsr),print(sess.run(ssim)))
print("[*] pnsr: %.4f ssim: %.4f" % (pnsr, ssim))
