# %%
import tensorflow as tf
import cv2
from PIL import Image
from tensorflow import keras
import numpy as np
import os

# %%
os.environ["CUDA_VISIBLE_DEVICES"] = '0'  # 指定第一块GPU可用
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9  # 程序最多只能占用指定gpu50%的显存
config.gpu_options.allow_growth = True  # 程序按需申请内存
sess = tf.compat.v1.Session(config=config)

# %%
from tensorflow.keras.mixed_precision import experimental as mixed_precision


# %%
# 读取视频帧每个time秒读取一帧
def get_movie_frame(path, time):
    cap = cv2.VideoCapture(path)
    total_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_data = []
    for i in range(20, int(total_frame // time - 20)):
        cap.set(cv2.CAP_PROP_POS_FRAMES, i * time)
        success, frame = cap.read()
        if success:
            frame_data.append(frame)
            # cv2.imencode('.png',frame)[1].tofile('C:/Users/13167/Desktop/frame/test' + str(i) + '.png')
    return frame_data


# %%
# 将数组图片中的每张图的对角线抽出size * size大小的图片
def get_img_data(frame_data, x_size, y_size):
    data_y = []
    data_x = []
    h_num = int(len(frame_data[0]) // (y_size * 2))
    w_num = int(len(frame_data[0][0]) // (x_size * 2))
    while len(frame_data):
        img = frame_data.pop()
        for num in range(min(h_num, w_num)):
            sub_img = img[num * (x_size * 2):(num + 1) * (x_size * 2), num * (y_size * 2):(num + 1) * (y_size * 2), :]
            data_y.append(sub_img)
            data_x.append(cv2.resize(sub_img, [y_size, x_size]))
    return data_x, data_y


# %%
def get_effect_area(frame_list):
    frame_data = []
    for index in range(len(frame_list)):
        frame_data.append(frame_list[index][138:942, :, :])
    return frame_data


# %%
def read_img():
    # 数据读取 高分辨率图片
    data_x = []
    data_y = []
    path = '../train/train'
    photo_num = 0
    y_size = 256
    x_size = 128

    for fileName in os.listdir(path):
        if photo_num < 300:
            print('processing[%d]:[%s]' % (photo_num, fileName))
            img = cv2.imread(path + '/' + fileName)
            patch = img.shape[0] // y_size
            for num in range(patch):
                data_y.append(img[num * y_size:(num + 1) * y_size, num * y_size:(num + 1) * y_size])
                data_x.append(
                    cv2.resize(img[num * y_size:(num + 1) * y_size, num * y_size:(num + 1) * y_size], [x_size, x_size]))
            photo_num = photo_num + 1
    return data_x, data_y


# %%
def soft_dice_loss(y_true, y_pred, epsilon=1e-7):
    y_true = tf.cast(y_true, 'float32')
    y_pred = tf.cast(y_pred, 'float32')
    axes = tuple(range(1, len(y_pred.shape) - 1))
    MSE = tf.keras.losses.MSE(y_pred=y_pred, y_true=y_true)
    numerator = 2. * tf.reduce_sum(y_pred * y_true, axes)
    denominator = tf.reduce_sum(tf.square(y_pred) + tf.square(y_true), axes)
    return MSE + (1 - tf.reduce_mean(numerator / (denominator + epsilon)))


#     return MSE

# %%
def mse_and_l1(y_pred, y_true):
    y_true = tf.cast(y_true, 'float32')
    y_pred = tf.cast(y_pred, 'float32')
    MSE = tf.keras.losses.MSE(y_pred=y_pred, y_true=y_true)
    diff = tf.abs(y_true - y_pred)
    less_than_one = tf.cast(tf.less(diff, 1.0), 'float32')
    smooth_l1_loss = (less_than_one * 0.5 * diff ** 2) + (1.0 - less_than_one) * (diff - 0.5)
    return tf.reduce_mean(smooth_l1_loss) + MSE


# %%
def psnr(y_pred, y_true):
    return tf.image.psnr(y_pred, y_true, max_val=1.0)


# %%
def filter_block(input, filters=16, kernel_size_list=[1, 3, 5]):
    layer_list = []
    for kernel_size in kernel_size_list:
        layer_list.append(
            tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, padding='same', activation=tf.nn.relu)(
                input))
    con_cat = tf.keras.layers.concatenate(layer_list)
    output = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, padding='same', activation=tf.nn.relu)(con_cat)
    return output


# %%
def ssim_loss(y_pred, y_true):
    return (1 - tf.reduce_mean(tf.image.ssim(y_true, y_pred, max_val=1.0))) * mse_and_l1(y_true, y_pred)


# %%
def ssim(y_pred, y_true):
    return tf.reduce_mean(tf.image.ssim(y_true, y_pred, max_val=1.0, filter_size=7))


# %%
def ssim_mul(y_pred, y_true):
    return tf.reduce_mean(tf.image.ssim_multiscale(y_true, y_pred, max_val=1.0, filter_size=7))


# %%
def mix_ssim_loss(y_pred, y_true):
    y_true = tf.cast(y_true, 'float32')
    y_pred = tf.cast(y_pred, 'float32')
    return 2 - tf.reduce_mean(tf.image.ssim(y_true, y_pred, max_val=1.0)) - tf.reduce_mean(
        tf.image.ssim_multiscale(y_true, y_pred, max_val=1.0, filter_size=7))


# %%
# def scheduler(epoch):
#     if epoch % 100:
#         keras.backend.set_value(keras.backend.get_value() * 0.5)
#     return model.lr.get_value()
# change_lr = keras.callbacks.LearningRateScheduler(scheduler)


reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', patience=10, mode='auto', cooldown=0, factor=0.9,
                                              verbose=2)


# %%
def super_pixel():
    input_xs = tf.keras.Input(shape=[x_size, y_size, 3])
    conv_99 = tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='same', activation=tf.nn.relu)(input_xs)
    conv_0 = tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='same', activation=tf.nn.relu)(conv_99)
    conv_0 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='same', activation=tf.nn.relu)(conv_0)
    uppool_0 = tf.keras.layers.UpSampling2D()(conv_0)
    convT_0 = tf.keras.layers.Conv2DTranspose(filters=64, kernel_size=2, padding='same', activation=tf.nn.relu)(
        uppool_0)
    conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='same', activation=tf.nn.relu)(convT_0)
    conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='same', activation=tf.nn.relu)(conv_1)
    conv_1 = tf.keras.layers.BatchNormalization()(conv_1)
    pool_1 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), padding='same')(conv_1)
    conv_2 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, padding='same', activation=tf.nn.relu)(pool_1)
    conv_2 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, padding='same', activation=tf.nn.relu)(conv_2)
    conv_2 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, padding='same', activation=tf.nn.relu)(conv_2)
    conv_2 = tf.keras.layers.BatchNormalization()(conv_2)
    pool_2 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), padding='same')(conv_2)
    conv_3 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation=tf.nn.relu)(pool_2)
    conv_3 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation=tf.nn.relu)(conv_3)
    conv_3 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation=tf.nn.relu)(conv_3)
    conv_3 = tf.keras.layers.BatchNormalization()(conv_3)
    pool_3 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), padding='same')(conv_3)
    conv_4 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, padding='same', activation=tf.nn.relu)(pool_3)
    conv_4 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, padding='same', activation=tf.nn.relu)(conv_4)
    conv_4 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, padding='same', activation=tf.nn.relu)(conv_4)
    conv_4 = tf.keras.layers.BatchNormalization()(conv_4)
    uppool_6 = tf.keras.layers.UpSampling2D()(conv_4)
    convT_6 = tf.keras.layers.Conv2DTranspose(filters=256, kernel_size=2, padding='same', activation=tf.nn.relu)(
        uppool_6)
    con_6 = tf.keras.layers.concatenate([convT_6, conv_3])
    filter_block_5 = filter_block(con_6, filters=256, kernel_size_list=[2, 3, 4])
    filter_block_5 = filter_block(filter_block_5, filters=256, kernel_size_list=[2, 3, 4])
    #     conv_6 = tf.keras.layers.BatchNormalization()(filter_block_4)
    uppool_7 = tf.keras.layers.UpSampling2D()(filter_block_5)
    convT_7 = tf.keras.layers.Conv2DTranspose(filters=128, kernel_size=2, padding='same', activation=tf.nn.relu)(
        uppool_7)
    con_7 = tf.keras.layers.concatenate([convT_7, conv_2])
    filter_block_6 = filter_block(con_7, filters=128, kernel_size_list=[2, 3, 4])
    filter_block_6 = filter_block(filter_block_6, filters=128, kernel_size_list=[2, 3, 4])
    #     conv_7 = tf.keras.layers.BatchNormalization()(conv_7)
    uppool_8 = tf.keras.layers.UpSampling2D()(filter_block_6)
    convT_8 = tf.keras.layers.Conv2DTranspose(filters=64, kernel_size=2, padding='same', activation=tf.nn.relu)(
        uppool_8)
    con_8 = tf.keras.layers.concatenate([convT_8, conv_1])
    filter_block_7 = filter_block(con_8, filters=64, kernel_size_list=[1, 2, 3])
    filter_block_8 = filter_block(filter_block_7, filters=32, kernel_size_list=[1, 2, 3])
    #     conv_9 = tf.keras.layers.BatchNormalization()(filter_block_6)
    #     conv_10 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='same',activation=tf.nn.relu)(conv_9)
    #     conv_10 = tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='same',activation=tf.nn.relu)(conv_10)
    #     conv_10 = tf.keras.layers.BatchNormalization()(conv_10)
    conv_10 = tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='same', activation=tf.nn.relu)(filter_block_8)
    conv_10 = tf.keras.layers.Conv2D(filters=3, kernel_size=1, padding='same', activation=tf.nn.relu)(conv_10)
    model = tf.keras.Model(inputs=input_xs, outputs=conv_10)

    model.summary()
    return model


# %%


if __name__ == '__main__':
    # 混合精度训练设置，gpu使用 “mixed_float16” tpu使用“mixed_bfloat16”
    policy = mixed_precision.Policy('mixed_float16')
    mixed_precision.set_policy(policy)

    # %%
    # 启用动态图机制
    tf.compat.v1.disable_eager_execution()

    # %%
    # Image.fromarray(frame_data.pop()[1 * 512:(1 + 1) * 512,1* 512:(1 + 1) * 512])
    # Image.fromarray(cv2.resize(frame_data.pop()[1 * 512:(1 + 1) * 512,1* 512:(1 + 1) * 512],[256,256]))

    # %%
    # x_size = 128
    # y_size = 120

    x_size = 64
    y_size = 64
    video_frame_num = 24
    movie_path = "E:/迅雷下载/哪吒之魔童降.mkv"
    video_output_path = r'C:/Users/13167/Desktop/frame/test.mp4'

    # %%
    data_x, data_y = get_img_data(get_effect_area(get_movie_frame(movie_path, 200)), x_size, y_size)

    # %%
    model = super_pixel()
    # %%
    # model = keras.models.load_model('./model/train_model.h5',compile=False)

    # %%
    opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(tf.keras.optimizers.Adam(lr=0.0001),
                                                                   loss_scale='dynamic')
    model.compile(optimizer=opt, loss=mix_ssim_loss, metrics=[psnr, ssim, ssim_mul])

    # %%
    model.fit(np.array(data_x) / 255., np.array(data_y) / 255., epochs=1000, batch_size=2, verbose=2,
              callbacks=[reduce_lr])
