# -*- coding: utf-8 -*-
import os
import PIL
from PIL import ImageOps
from PIL import Image as PILImage
import PIL.ImageOps
import numpy as np
import random
from IPython.display import display, Image
from tensorflow.keras.utils import load_img
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose
from tensorflow.keras.layers import MaxPooling2D, Cropping2D, Concatenate
from tensorflow.keras.layers import Lambda, Activation, BatchNormalization, Dropout
from tensorflow.keras.models import Model

# 图片位置
input_dir = "segdata/images/"
# 标注位置
target_dir = "segdata/annotations/trimaps"
img_size = (160, 160)
batch_size = 32
num_classes = 4

# 图片的路径
input_img_paths = sorted([os.path.join(input_dir, fname) for fname in os.listdir(input_dir) if fname.endswith('.jpg')])
# 标注的路径
target_img_paths = sorted([os.path.join(target_dir, fname) for fname in os.listdir(target_dir) if fname.endswith('.png') and not fname.startswith('.')])


# 展示第一个图像
display(Image(filename=input_img_paths[0]))

# 显示标注图像
img = PIL.ImageOps.autocontrast(load_img(target_img_paths[0], color_mode='grayscale'))
display(img)


# 数据集获取
class OxfordPets(keras.utils.Sequence):
    def __init__(self, input_img_paths, target_img_paths, batch_size, img_size):
        self.input_img_paths = input_img_paths
        self.target_img_paths = target_img_paths
        self.batch_size = batch_size
        self.img_size = img_size

    def __len__(self):
        return len(self.target_img_paths) // self.batch_size

    def __getitem__(self, idx):
        """
        获取每一个batch的数据
        """
        i = idx * self.batch_size
        # 获取输入的图像数据
        batch_input_img_paths = self.input_img_paths[i:i + self.batch_size]
        # 获取目标的图像数据
        batch_target_img_paths = self.target_img_paths[i:i + self.batch_size]

        #构建特征值数据：获取图像数据中每个像素的数据存储在x中
        x = np.zeros((batch_size,) + self.img_size + (3,), dtype="float32")
        for j, path in enumerate(batch_input_img_paths):
            img = load_img(path, target_size=self.img_size)
            x[j] = img
        #构建目标值数据：获取标注图像数据中每个像素的数据存储在y中
        y = np.zeros((batch_size,) + self.img_size + (1,), dtype="uint8")
        for j, path in enumerate(batch_target_img_paths):
            img = load_img(path, color_mode='grayscale', target_size=self.img_size)
            y[j] = np.expand_dims(img, 2)
        return x, y
        
# 构建Unet网络

def downsampling_block(input_tensor, filters):
    """
    下采样块
    """
    x = Conv2D(filters, kernel_size=(3,3), padding='same')(input_tensor)
    # BN层
    x = BatchNormalization()(x)
    # 激活函数
    x = Activation('relu')(x)
    # 卷积层
    x = Conv2D(filters, kernel_size=(3,3), padding='same')(x)
    # BN层
    x = BatchNormalization()(x)
    # 激活层
    x = Activation('relu')(x)
    # 返回的是池化后的值和激活未池化的值，激活后未池化的值用于解码部分特征级联
    return MaxPooling2D(pool_size=(2,2))(x), x

def upsampling_block(input_tensor, skip_tensor, filters):
    # 反卷积
    x = Conv2DTranspose(filters, kernel_size=(2,2), strides=(2,2), padding="same")(input_tensor)
    # 获取当前特征图的尺寸
    _, x_height, x_width, _ = x.shape
    # 获取要融合的特征图的尺寸
    _, s_height, s_width, _ = skip_tensor.shape
    # 获取特征图的大小差异
    h_crop = s_height - x_height
    w_crop = s_width - x_width
    # 若特征图大小相同不进行裁剪
    if h_crop == 0 and w_crop == 0 :
        y = skip_tensor
    # 若特征图大小不同，使级联时像素大小一致
    else:
        # 获取特征图裁剪后的特征图的大小
        cropping = ((h_crop//2, h_crop - h_crop//2), (w_crop//2, w_crop - w_crop//2))
        # 特征图裁剪
        y = Cropping2D(cropping=cropping)(skip_tensor)
    # 特征融合
    x = Concatenate()([x, y])
    # 卷积
    x = Conv2D(filters, kernel_size=(3,3), padding="same")(x)
    # BN层
    x = BatchNormalization()(x)
    # 激活层
    x = Activation('relu')(x)
    # 卷积层
    x = Conv2D(filters, kernel_size=(3,3), padding='same')(x)
    # BN层
    x = BatchNormalization()(x)
    # 激活层
    x = Activation('relu')(x)
    return x

# 使用3个深度构建unet网络
def unet(imagesize, classes, features=64, depth=3):
    #  定义输入数据
    inputs = keras.Input(shape = img_size + (3,))
    x = inputs
    # 用来存放进行特征融合的特征图
    skips = []
    # 构建编码部分
    for i in range(depth):
        x, x0 = downsampling_block(x, features)
        skips.append(x0)
        # 下采样过程中，深度增加，特征翻倍，即每次使用翻倍数目的滤波器
        features *= 2
    # 卷积
    x = Conv2D(filters=features, kernel_size=(3,3), padding="same")(x)
    # BN层
    x = BatchNormalization()(x)
    # 激活
    x = Activation('relu')(x)
    # 卷积
    x = Conv2D(filters=features, kernel_size=(3,3), padding="same")(x)
    # BN层
    x = BatchNormalization()(x)
    # 激活
    x = Activation('relu')(x)
    # 解码过程
    for i in reversed(range(depth)):
        # 深度增加，特征图通道减半
        features //= 2
        # 上采样
        x = upsampling_block(x, skips[i], features)
    # 卷积
    x = Conv2D(filters=classes, kernel_size=(1,1), padding="same")(x)
    # 激活
    outputs = Activation('softmax')(x)
    # 模型定义
    model = keras.Model(inputs, outputs)
    return model

model = unet(img_size, 4)
model.summary()

# 将数据集划分为训练集和验证集，其中验证集的数量设为1000
val_samples = 1000
# 将数据集打乱(图像与标注信息的随机数种子是一样的，才能保证数据的正确性)
random.Random(1337).shuffle(input_img_paths)
random.Random(1337).shuffle(target_img_paths)
# 获取训练集数据路径
train_input_img_paths = input_img_paths[:-val_samples]
train_target_img_paths = target_img_paths[:-val_samples]
# 获取验证集数据路径
val_input_img_paths = input_img_paths[-val_samples:]
val_target_img_paths = target_img_paths[-val_samples:]

# 获取训练集
train_gen = OxfordPets(train_input_img_paths, train_target_img_paths, batch_size, img_size)
# 模型验证集
val_gen = OxfordPets(val_input_img_paths, val_target_img_paths, batch_size, img_size)

# 编译模型
model.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy")

# 模型训练
epochs = 15
model.fit(train_gen, epochs=epochs, validation_data=val_gen)

# 获取验证集数据并进行预测
val_gen = OxfordPets(val_input_img_paths, val_target_img_paths, batch_size, img_size)
val_preds = model.predict(val_gen)

# 定义预测结果显示方法
def display_mask(i):
    # 获取到第i个样本的预测结果
    mask = np.argmax(val_preds[i], axis=-1)
    # 维度调整
    mask = np.expand_dims(mask, axis=-1)
    # 转换为图像并显示
    img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))
    display(img)

# 选择一个图像
i = 10
display(Image(filename=val_input_img_paths[i]))
# 展示标注的数据
img = PIL.ImageOps.autocontrast(load_img(val_target_img_paths[i]))
display(img)

# 显示预测结果
display_mask(i)


# def main():
#     print("Hello from unet-learn!")


# if __name__ == "__main__":
#     main()
