from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Lambda, Input, concatenate
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import ELU
from tensorflow.keras.optimizers import Adam, SGD, Adamax, Nadam
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, CSVLogger, EarlyStopping
import tensorflow.keras.backend as K
import tensorflow as tf
from tensorflow.keras.preprocessing import image

#from keras_tqdm import TQDMNotebookCallback

import json
import os
import numpy as np
import pandas as pd
from Generator import DriveDataGenerator
from Cooking import checkAndCreateDir
import h5py
from PIL import Image, ImageDraw
import math
import matplotlib.pyplot as plt

COOKED_DATA_DIR='data_cooked/'
MODEL_OUTPUT_DIR='model'

train_dataset = h5py.File(os.path.join(COOKED_DATA_DIR,'train.h5'),'r')
test_dataset = h5py.File(os.path.join(COOKED_DATA_DIR,'test.h5'),'r')
eval_dataset = h5py.File(os.path.join(COOKED_DATA_DIR,'eval.h5'),'r')

num_train_examples = train_dataset['image'].shape[0]
num_test_examples = test_dataset['image'].shape[0]
num_eval_examples = eval_dataset['image'].shape[0]

batch_size = 32


data_generator = DriveDataGenerator(rescale=1./255, brightness_range=(0, 0.4), horizontal_flip=True)
train_generator = data_generator.flow \
    (train_dataset['image'], train_dataset['previous_state'], train_dataset['label'], batch_size=batch_size, zero_drop_percentage=0.95, roi=[76,135,0,255])
eval_generator = data_generator.flow \
    (eval_dataset['image'], eval_dataset['previous_state'], eval_dataset['label'], batch_size=batch_size,
     zero_drop_percentage=0.95, roi=[76, 135, 0, 255])

def draw_image_with_label(img, label, prediction=None):
    theta = label * 0.69
    line_length = 50
    line_thickness = 3
    label_line_color = (255, 0, 0)
    prediction_line_color = (0, 0, 255)
    pil_image = image.array_to_img(img, K.image_data_format(), scale = True)
    print('Actual Steering Angle = {0}'.format(label))

    draw_image = pil_image.copy()
    image_draw = ImageDraw.Draw(draw_image)
    first_point = (int(img.shape[1]/2), img.shape[0])
    second_point = ((int(img.shape[1]/2) + (line_length * math.sin(theta))), int(img.shape[0] - (line_length * math.cos(theta))))
    image_draw.line([first_point, second_point], fill=label_line_color, width=line_thickness)

    if(prediction is not None):
        print('Prediction Steering Angle = {0}'.format(prediction))
        print('L1 error: {0}'.format(abs(prediction - label)))
        theta = prediction * 0.69
        second_point = ((int(img.shape[1] / 2) + (line_length * math.sin(theta))), int(img.shape[0] - (line_length * math.cos(theta))))
        image_draw.line([first_point, second_point], fill=prediction_line_color, width=line_thickness)

    del image_draw
    plt.imshow(draw_image)
    plt.show()

[sample_batch_train_data, sample_batch_test_data] = next(train_generator)
for i in range(0, 3 ,1):
    draw_image_with_label(sample_batch_train_data[0][i], sample_batch_test_data[i])



image_input_shape = sample_batch_train_data[0].shape[1:]
state_input_shape = sample_batch_train_data[1].shape[1:]
activation = 'relu'

img_input = Input(shape=image_input_shape)

img_stack = Conv2D(16, 3, name='conv0', padding='same', activation=activation)(img_input)
img_stack = MaxPooling2D(pool_size=2)(img_stack)
img_stack = Conv2D(32, 3, activation=activation, padding='same', name='conv1')(img_stack)
img_stack = MaxPooling2D(pool_size=2)(img_stack)
img_stack = Conv2D(32, 3, activation=activation, padding='same', name='conv2')(img_stack)
img_stack = MaxPooling2D(pool_size=2)(img_stack)
img_stack = Flatten()(img_stack)
img_stack = Dropout(rate=0.2)(img_stack)

#Inject the state input
state_input = Input(shape=state_input_shape)
merged = concatenate([img_stack, state_input])

#Add Dense
merged = Dense(64, activation=activation, name='dense0')(merged)
merged = Dropout(0.2)(merged)
merged = Dense(10, activation=activation, name='dense1')(merged)
merged = Dropout(0.2)(merged)
merged = Dense(1, name='output')(merged)

adam = Nadam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model = Model(inputs=[img_input, state_input],outputs=merged)
model.compile(optimizer=adam, loss='mse')
model.summary()


#callback of keras
plateau_cb = ReduceLROnPlateau(monitor='val_loss', factor=0.5,\
                               patience=3, min_lr=0.0001, verbose=1) #当验证数据损失停止增加，减小lr
checkpoint_filepath = os.path.join(MODEL_OUTPUT_DIR, 'models', '{0}_model.{1}-{2}.h5'.format('model','{epoch:02d}','{val_loss:.7f}'))
checkAndCreateDir(checkpoint_filepath)
checkpoint_callback = ModelCheckpoint(checkpoint_filepath, save_best_only=True, verbose=1)
csv_cb = CSVLogger(os.path.join(MODEL_OUTPUT_DIR, 'training_log.csv'))
early_stopping_cb = EarlyStopping(monitor='val_loss', patience=10, verbose=1) #当验证损失停止增加，便停止训练，避免过拟合

callbacks = [plateau_cb, csv_cb, checkpoint_callback, early_stopping_cb]

history = model.fit_generator(train_generator, steps_per_epoch=num_train_examples//batch_size,\
                              epochs=500, callbacks=callbacks, validation_data=eval_generator,\
                              validation_steps=num_eval_examples//batch_size, verbose=2)


predictions = model.predict([sample_batch_train_data[0],sample_batch_train_data[1]])
for i in range(0, 5):
    draw_image_with_label(sample_batch_train_data[0][i],sample_batch_test_data[i],predictions[i])