import os
import glob
import numpy as np
import keras.backend as K
from keras.models import Model
from keras.layers import Input
from keras.optimizers import Adam
from keras.callbacks import TensorBoard,ModelCheckpoint,ReduceLROnPlateau,EarlyStopping
from yolo3.yolov3_body import body
from yolo3.get_ytrue import generator
from yolo3.get_loss import fn_loss

K.clear_session()

#获取路径
root = os.path.dirname(__file__)
ann_dir = os.path.join('E:/Desktop/paper_data/intermediate/cloud/ann/', "*.xml")
ann_fnames = glob.glob(ann_dir)
img_dir = 'E:/Desktop/paper_data/intermediate/cloud/img/'
log_dir = 'logs/'
path = 'yolo_weights.h5'

anchors = np.array([[10,13], [16,30], [33,23], [30,61], [62,45], [59,119], [116,90], [156,198], [373,326]])
pattern_shape = [52,26,13]
anchor_shape = [3,3]
input_size = 416
batch_size = 4

classes = ['car', 'person', 'cyclist', 'truck', 'bike', 'van','bus']
#训练和测试的比例
num_train = int(len(ann_fnames)*0.9)
num_val = int(len(ann_fnames)-num_train )
#回调函数
logging = TensorBoard(log_dir = log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
                             monitor='val_loss', save_weights_only=True, save_best_only=True, period=2)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=5, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=6, verbose=1)

model_input = Input(shape=(input_size,input_size,3))
model_output = body(model_input,anchor_shape[1],len(classes))
model = Model(model_input,model_output)
model.summary()
# model.load_weights(path,by_name=True,skip_mismatch=True)

# freeze_layers = 249
# for i in range(freeze_layers): model.layers[i].trainable = False
#
# model.compile(optimizer=Adam(lr=0.00001),loss=fn_loss)
# model.fit_generator(generator(batch_size,pattern_shape,anchor_shape,classes,ann_fnames[:num_train],input_size,anchors,img_dir),
#                     steps_per_epoch=max(1,num_train//batch_size),
#                     validation_data=generator(batch_size,pattern_shape,anchor_shape,classes,ann_fnames[num_train:],input_size,anchors,img_dir),
#                     validation_steps=max(1,num_val),
#                     epochs=10,
#                     initial_epoch=0,
#                     callbacks=[logging,checkpoint,reduce_lr,early_stopping])
# model.save_weights(log_dir+'new_weights.h5')
#
#
# freeze_layers = 252
# for i in range(freeze_layers): model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=0.001),loss=fn_loss)
model.fit_generator(generator(batch_size,pattern_shape,anchor_shape,classes,ann_fnames[:num_train],input_size,anchors,img_dir),
                    steps_per_epoch=max(1,num_train//batch_size),
                    validation_data=generator(batch_size,pattern_shape,anchor_shape,classes,ann_fnames[num_train:],input_size,anchors,img_dir),
                    validation_steps=max(1,num_val),
                    epochs=100,
                    initial_epoch=0,
                    callbacks=[logging,checkpoint,reduce_lr])
model.save_weights(log_dir+'last_weights.h5')
