from keras.layers import Input, Conv2D, Dropout, MaxPooling2D, Concatenate, UpSampling2D,Flatten,Dense,Layer
from keras.optimizers import Adam, RMSprop
from keras.callbacks import ModelCheckpoint, TensorBoard, LambdaCallback
from keras.losses import binary_crossentropy,binary_focal_crossentropy
from keras.metrics import binary_accuracy
from keras.models import Model
from keras.regularizers import l1, l2
from keras.preprocessing.image import ImageDataGenerator

import os
import cv2
import json
import glob
import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

def cnn(n_classes,input_shape=(255, 255, 3)):
    model =  keras.Sequential([
        Input(shape=input_shape),
        Conv2D(32, kernel_size=(3, 3), activation='relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Conv2D(64, kernel_size=(3, 3), activation='relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Conv2D(128, kernel_size=(3, 3), activation='relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Flatten(),
        Dense(64, activation='relu'),
        Dense(n_classes, activation='sigmoid' if n_classes==1 else 'softmax')
    ])
    model.summary()
    model.compile(optimizer=Adam(learning_rate=1e-3), loss=binary_crossentropy, metrics=[binary_accuracy])
    return model
def unet(n_classes=2, input_shape=(255, 255, 3)):
    img_input = Input(shape=input_shape)
    
   # 编码器部分
    conv1 = Conv2D(64, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(img_input)
    conv1 = Conv2D(64, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(conv1)
    pool1 = MaxPooling2D((2, 2), strides=2)(conv1)

    conv2 = Conv2D(128, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(pool1)
    conv2 = Conv2D(128, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(conv2)
    pool2 = MaxPooling2D((2, 2), strides=2)(conv2)

    conv3 = Conv2D(256, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(pool2)
    conv3 = Conv2D(256, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(conv3)
    pool3 = MaxPooling2D((2, 2), strides=2)(conv3)

    conv4 = Conv2D(512, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(pool3)
    conv4 = Conv2D(512, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(conv4)
    pool4 = MaxPooling2D((2, 2), strides=2)(conv4)
    
    conv5 = Conv2D(1024, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(pool4)
    conv5 = Conv2D(1024, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)

    # 解码器部分
    up6 = Conv2D(512, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(UpSampling2D((2, 2))(drop5))
    up6 = tf.pad(up6,[[0,0],[0,conv4.shape[1]-up6.shape[1]],[0,conv4.shape[2]-up6.shape[2]],[0,0]])
    merge6 = Concatenate(axis=-1)([up6, conv4])
    conv6 = Conv2D(512, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(merge6)
    conv6 = Conv2D(512, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(conv6)

    up7 = Conv2D(256, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(UpSampling2D((2, 2))(conv6))
    up7 = tf.pad(up7,[[0,0],[0,conv3.shape[1]-up7.shape[1]],[0,conv3.shape[2]-up7.shape[2]],[0,0]])
    merge7 = Concatenate(axis=-1)([up7, conv3])
    conv7 = Conv2D(256, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(merge7)
    conv7 = Conv2D(256, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(conv7)

    up8 = Conv2D(128, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(UpSampling2D((2, 2))(conv7))
    up8 = tf.pad(up8,[[0,0],[0,conv2.shape[1]-up8.shape[1]],[0,conv2.shape[2]-up8.shape[2]],[0,0]])
    merge8 = Concatenate(axis=-1)([up8, conv2])
    conv8 = Conv2D(128, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(merge8)
    conv8 = Conv2D(128, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(conv8)

    up9 = Conv2D(64, (3, 3), activation='relu', padding='same',kernel_initializer = 'he_normal')(UpSampling2D((2, 2))(conv8))
    up9 = tf.pad(up9,[[0,0],[0,conv1.shape[1]-up9.shape[1]],[0,conv1.shape[2]-up9.shape[2]],[0,0]])
    merge9 = Concatenate(axis=-1)([up9, conv1])
    conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)

    # 输出层
    outputs = Conv2D(n_classes, (1, 1), padding='same', activation='sigmoid' if n_classes==1 else 'softmax')(conv9)
    model = Model(inputs=img_input, outputs=outputs, name="unet")
    model.summary()
    model.compile(optimizer=Adam(learning_rate=1e-3), loss=binary_focal_crossentropy, metrics=[binary_accuracy])
    return model
def integrate(n_classes,input_shape=(255, 255, 3)):
    input1 = Input(shape=input_shape)
    output1 = cnn(n_classes,(*input_shape[:2],1))(unet(n_classes,input_shape)(input1))
    integrated_model = Model(inputs=input1, outputs=output1)
    cnn_model.trainable=False
    integrated_model.summary()
    integrated_model.compile(optimizer=Adam(learning_rate=1e-3), loss=binary_crossentropy, metrics=[binary_accuracy])
    cnn_model.trainable=True
    return integrated_model
    
def generator(image_directory,target_size,batch_size,dict_data,seed,image_path,mask_path):
    datagen_image = ImageDataGenerator(**dict_data)
    datagen_label = ImageDataGenerator(**dict_data)
    datagen_cnn = ImageDataGenerator(**dict_data)
    data_image = datagen_image.flow_from_directory(
            image_directory,
            #classes=['image'],
            classes=[image_path],
            target_size=target_size,
            batch_size=batch_size,       #样本数不易过大
            seed=seed)
    data_label = datagen_label.flow_from_directory(
            image_directory,
            #classes=["label"],
            classes=[mask_path],
            color_mode='grayscale',
            target_size=target_size,
            batch_size=batch_size,       #样本数不易过大
            seed=seed)
    data_cnn = datagen_cnn.flow_from_directory(
            image_directory,
            classes=[image_path,mask_path],
            color_mode='grayscale',
            target_size=target_size,
            batch_size=batch_size,       #样本数不易过大
            class_mode="binary",
            seed=seed)
    return zip(data_image, data_label, data_cnn)
def dataset_cnn(data):
    for (i,img) in enumerate(data):
        image,label = img[2][0],img[2][1]
        yield (image,label)
def dataset_unet(data):
    for (i,img) in enumerate(data):
        if i%20==0: continue
        image,label = img[0][0],img[1][0]
        yield (image,label)
def dataset_integrate(data):
    for (i,img) in enumerate(data):
        image,label = img[0][0],img[2][1]
        yield (image,label)
            
if __name__ == "__main__":
    path = 'deep_net\\video'
    image_path,mask_path='image','label'    
    path = 'E:/Opensource/model/tusimple_preprocessed/training'
    #path = 'unet\\data\\membrane\\train'
    image_path,mask_path='frames','lane-masks'
    img = cv2.imread(glob.glob(path+"\\"+image_path+"\\*.jpg")[0])
    unet_model = unet(1,img.shape)
    cnn_model = cnn(1,(*img.shape[:2],1))
    integrated_model = integrate(1,img.shape)
    if False:
        seed,batch_size = 1,2
        param = dict(rescale=1./255,rotation_range=10,width_shift_range=0.2,height_shift_range=0.2,
                        shear_range=0.2,zoom_range=0.2,horizontal_flip=True,validation_split=0.2,fill_mode='constant')
        data = generator(path,img.shape[:2], batch_size, param, seed,image_path,mask_path)
        if 0:
            cnn_model.fit(dataset_cnn(data), steps_per_epoch=100, epochs=3, callbacks=[
                    TensorBoard('deep_net\\logs',write_images=True,histogram_freq=1,write_graph=False),
                    ModelCheckpoint("deep_net\\param\\cnn.keras",verbose=1),
                ])
        if 1:
            #unet_model.load_weights("deep_net\\param\\unet.keras")
            unet_model.fit(dataset_unet(data), steps_per_epoch=80, epochs=3, callbacks=[
                    TensorBoard('deep_net\\logs',write_images=True,histogram_freq=1,write_graph=False),
                    ModelCheckpoint("deep_net\\param\\unet.keras",verbose=1),
                ])
        if 0:
            cnn_model.load_weights("deep_net\\param\\cnn.keras")
            integrated_model.load_weights("deep_net\\param\\intergrated.keras")
            integrated_model.fit(dataset_integrate(data), steps_per_epoch=50, epochs=10, callbacks=[
                        TensorBoard('deep_net\\logs',write_images=True,histogram_freq=1,write_graph=False),
                        ModelCheckpoint("deep_net\\param\\intergrated.keras",verbose=1),
                    ])
    else:
        if 1:
            #integrated_model.load_weights("deep_net\\param\\intergrated.keras")
            #unet_model.load_weights("D:\\BaiduNetdiskDownload\\unet1.keras")
            unet_model.load_weights("deep_net\\param\\unet.keras")
            #cnn_model.load_weights("deep_net\\param\\cnn.keras")
        for file in glob.glob(path+"\\"+image_path+"\\*.jpg"):
            new_img = cv2.imread(file)/255.
            new_img = new_img.reshape(1,*new_img.shape[:3])
            new_img = unet_model.predict(new_img)
            print(len(new_img[new_img>0.1]),len(new_img[new_img<=0.1]))
            #plt.imshow(new_img[0,:,:,:])
            #plt.show()
            cv2.imshow("",new_img[0,:,:,:])
            cv2.waitKey(1)
        