# --*--Coding : utf-8 --*--
# --*-- Automatically generate training sets and models--*--
# --*-- Author : Dingchao     --*--
# --*-- Time : 2019.9.19      --*--
# --*--Based on tensorflow and keras --*--



import os
import cv2 as cv
import random
from lxml import etree, objectify
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger
from keras import backend as K
from math import ceil
from model import build_model
from keras_loss_function.keras_ssd_loss import SSDLoss
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize



class Sets_Generation():

    def __init__(self, pictures_dir):  # 输入的文件夹中包括一张背景图片和一个以目标类别命名的文件夹
        print('******棋类app识别模型生成器*******\n'
              '********Author:Dingchao**********\n'
              '********Time:20190921************\n'
              '只需把背景和目标放在对应文件夹即可\n')

        input('Input anykey to continue!')
        self.E = objectify.ElementMaker(annotate=False)
        files = os.listdir(pictures_dir)
        self.classlist = []
        # print(files)
        for file in files:
            if file.endswith('jpg') or file.endswith('png'):
                bg = cv.imread(pictures_dir + '/' + file)
                self.bg = cv.resize(bg, (bg.shape[1] // 4, bg.shape[0] // 4))
                # cv.imshow('bg',self.bg)
                # cv.waitKey(0)
                # print(pictures_dir + '/' + file)
            elif os.path.isdir(pictures_dir + '/' + file):
                self.objpath = pictures_dir + '/' + file
                self.objs = os.listdir(self.objpath)  # 目标文件名列表
        for obj in self.objs:
            if not obj[:-4] in self.classlist:
                self.classlist.append(obj[:-4])
        self.classlist.insert(0, 'background')
        print(self.classlist)

    def xml_base(self, path, filename):
        base_tree = self.E.annotation(
            self.E.folder('JPE'),
            self.E.filename(filename + '.jpg'),
            self.E.path(path + 'JPE/' + filename + '.jpg'),
            self.E.source(
                self.E.database('The VOC Database'),
                self.E.annotation('PASCAL VOC'),

            ),
            self.E.size(
                self.E.width(self.bg.shape[1]),
                self.E.height(self.bg.shape[0]),
                self.E.depth(self.bg.shape[2])
            ),
            self.E.segmented(0), )
        return base_tree

    def add_objs(self, add_num, filename, path):

        bg = self.bg.copy()
        base_tree = self.xml_base(path, filename)

        for i in range(add_num):
            add_obj = random.choice(self.objs)
            img = cv.imread(self.objpath + '/' + add_obj)
            add_img = cv.resize(img, (img.shape[1] // 4, img.shape[0] // 4))
            name = add_obj[:-4]
            ymin = random.randint(0, self.bg.shape[0] - add_img.shape[0])
            ymax = ymin + add_img.shape[1]
            xmin = random.randint(0, self.bg.shape[1] - add_img.shape[1])
            xmax = xmin + add_img.shape[0]
            for i in range(add_img.shape[0]):
                for j in range(add_img.shape[1]):
                    if not (add_img[i, j, 0] >= 220 and
                            add_img[i, j, 1] >= 220 and
                            add_img[i, j, 2] >= 220):  # 手动消除部分
                        bg[ymin + i, xmin + j, 0] = add_img[i, j, 0]
                        bg[ymin + i, xmin + j, 1] = add_img[i, j, 1]
                        bg[ymin + i, xmin + j, 2] = add_img[i, j, 2]
            add_tree = self.E.object(
                self.E.name(name),
                self.E.difficult(0),
                self.E.pose('Unspecified'),
                self.E.truncated(0),
                self.E.bndbox(
                    self.E.xmin(xmin),
                    self.E.ymin(ymin),
                    self.E.xmax(xmax),
                    self.E.ymax(ymax)
                ))
            base_tree.append(add_tree)
        etree.ElementTree(base_tree).write(path + 'Anno/' + filename + ".xml", pretty_print=True)
        cv.imwrite(path + 'JPE/' + filename + '.jpg', bg)

    def generate(self, save_path, need_num):
        print('正在生成训练图片！请稍等~')
        path = save_path
        for gen_num in range(need_num):
            filename = str(gen_num).zfill(6)
            add_num = random.randint(1, 10)
            self.add_objs(add_num, filename, path)
            if (gen_num + 1) % 100 == 0:
                print('已生成', gen_num + 1, '张图片！')
        print('所需数据集已经保存完毕')

    def sets_partitio(self, save_path):  # 训练集和测试集的划分
        trainval_percent = 0.75
        train_percent = 0.75
        xmlfilepath = save_path + 'Anno'
        txtsavepath = save_path + 'Imag/Main'
        total_xml = os.listdir(xmlfilepath)
        num = len(total_xml)
        list = range(num)
        tv = int(num * trainval_percent)
        tr = int(tv * train_percent)
        trainval = random.sample(list, tv)
        train = random.sample(trainval, tr)

        ftrainval = open(txtsavepath + '/trainval.txt', 'w')
        ftest = open(txtsavepath + '/test.txt', 'w')
        ftrain = open(txtsavepath + '/train.txt', 'w')
        fval = open(txtsavepath + '/val.txt', 'w')

        for i in list:
            name = total_xml[i][:-4] + '\n'
            if i in trainval:
                ftrainval.write(name)
                if i in train:
                    ftrain.write(name)
                else:
                    fval.write(name)
            else:
                ftest.write(name)

        ftrainval.close()
        ftrain.close()
        fval.close()
        ftest.close()
        print('数据集划分已经完成！')

    def train_model(self, save_path, epoch_num, steps_per_epoch):
        print('正在训练识别模型！这个过程会比较漫长，请耐心等待~')
        img_height = self.bg.shape[0]
        img_width = self.bg.shape[1]
        img_channels = self.bg.shape[2]
        intensity_mean = 127.5
        intensity_range = 127.5
        n_classes = len(self.classlist)
        scales = [0.08, 0.09, 0.10, 0.11, 0.13]
        aspect_ratios = [0.6, 1.0, 1.6]
        two_boxes_for_ar1 = True
        steps = None
        offsets = None
        clip_boxes = False
        variances = [1.0, 1.0, 1.0, 1.0]
        normalize_coords = True
        K.clear_session()

        model = build_model(image_size=(img_height, img_width, img_channels),
                            n_classes=n_classes,
                            mode='training',
                            l2_regularization=0.0005,
                            scales=scales,
                            aspect_ratios_global=aspect_ratios,
                            aspect_ratios_per_layer=None,
                            two_boxes_for_ar1=two_boxes_for_ar1,
                            steps=steps,
                            offsets=offsets,
                            clip_boxes=clip_boxes,
                            variances=variances,
                            normalize_coords=normalize_coords,
                            subtract_mean=intensity_mean,
                            divide_by_stddev=intensity_range)
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0)

        ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)

        model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
        train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None,
                                      labels_output_format=('class_id', 'xmin', 'ymin', 'xmax', 'ymax'))
        val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None,
                                    labels_output_format=('class_id', 'xmin', 'ymin', 'xmax', 'ymax'))
        images_dir = save_path + 'JPE/'
        annotations_dir = save_path + 'Anno/'
        train_labels_filename = save_path + 'Imag/Main/train.txt'
        val_labels_filename = save_path + '/Imag/Main/val.txt'
        classes = self.classlist
        train_dataset.parse_xml(images_dirs=[images_dir],
                                image_set_filenames=[train_labels_filename],
                                annotations_dirs=[annotations_dir],
                                classes=classes,
                                include_classes='all',
                                exclude_truncated=False,
                                exclude_difficult=False,
                                ret=False)

        val_dataset.parse_xml(images_dirs=[images_dir],
                              image_set_filenames=[val_labels_filename],
                              annotations_dirs=[annotations_dir],
                              classes=classes,
                              include_classes='all',
                              exclude_truncated=False,
                              exclude_difficult=False,
                              ret=False)
        train_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_train.h5',
                                          resize=False,
                                          variable_image_size=True,
                                          verbose=True)

        val_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_val.h5',
                                        resize=False,
                                        variable_image_size=True,
                                        verbose=True)

        train_dataset_size = train_dataset.get_dataset_size()
        val_dataset_size = val_dataset.get_dataset_size()

        print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
        print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))

        batch_size = 20

        data_augmentation_chain = DataAugmentationConstantInputSize(random_brightness=(-48, 48, 0.5),
                                                                    random_contrast=(0.5, 1.8, 0.5),
                                                                    random_saturation=(0.5, 1.8, 0.5),
                                                                    random_hue=(18, 0.5),
                                                                    random_flip=0.5,
                                                                    random_translate=((0.03, 0.5), (0.03, 0.5), 0.5),
                                                                    random_scale=(0.5, 2.0, 0.5),
                                                                    n_trials_max=3,
                                                                    clip_boxes=True,
                                                                    overlap_criterion='area',
                                                                    bounds_box_filter=(0.3, 1.0),
                                                                    bounds_validator=(0.5, 1.0),
                                                                    n_boxes_min=1,
                                                                    background=(0, 0, 0))

        predictor_sizes = [model.get_layer('classes4').output_shape[1:3],
                           model.get_layer('classes5').output_shape[1:3],
                           model.get_layer('classes6').output_shape[1:3],
                           model.get_layer('classes7').output_shape[1:3]]

        ssd_input_encoder = SSDInputEncoder(img_height=img_height,
                                            img_width=img_width,
                                            n_classes=n_classes,
                                            predictor_sizes=predictor_sizes,
                                            scales=scales,
                                            aspect_ratios_global=aspect_ratios,
                                            two_boxes_for_ar1=two_boxes_for_ar1,
                                            steps=steps,
                                            offsets=offsets,
                                            clip_boxes=clip_boxes,
                                            variances=variances,
                                            matching_type='multi',
                                            pos_iou_threshold=0.5,
                                            neg_iou_limit=0.3,
                                            normalize_coords=normalize_coords)

        train_generator = train_dataset.generate(batch_size=batch_size,
                                                 shuffle=True,
                                                 transformations=[data_augmentation_chain],
                                                 label_encoder=ssd_input_encoder,
                                                 returns={'processed_images',
                                                          'encoded_labels'},
                                                 keep_images_without_gt=False)

        val_generator = val_dataset.generate(batch_size=batch_size,
                                             shuffle=True,
                                             transformations=[],
                                             label_encoder=ssd_input_encoder,
                                             returns={'processed_images',
                                                      'encoded_labels'},
                                             keep_images_without_gt=False)

        model_checkpoint = ModelCheckpoint(filepath='ssd7_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
                                           monitor='val_loss',
                                           verbose=1,
                                           save_best_only=True,
                                           save_weights_only=False,
                                           mode='auto',
                                           period=1)

        csv_logger = CSVLogger(filename='ssd7_training_log.csv',
                               separator=',',
                               append=True)

        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=0.0,
                                       patience=7,
                                       verbose=1)

        reduce_learning_rate = ReduceLROnPlateau(monitor='val_loss',
                                                 factor=0.2,
                                                 patience=10,
                                                 verbose=1,
                                                 min_delta=0.001,
                                                 # epsilon=0.001,
                                                 cooldown=0.,
                                                 min_lr=0.00001)

        callbacks = [model_checkpoint,
                     csv_logger,
                     early_stopping,
                     reduce_learning_rate]
        initial_epoch = 0
        final_epoch = epoch_num
        history = model.fit_generator(generator=train_generator,
                                      steps_per_epoch=steps_per_epoch,
                                      epochs=final_epoch,
                                      callbacks=callbacks,
                                      validation_data=val_generator,
                                      validation_steps=ceil(val_dataset_size / batch_size),
                                      initial_epoch=initial_epoch)

        print('模型训练已经完成，查阅当前目录下的.h5文件吧~')


if __name__ == '__main__':
    save_path = '/home/dingchao/VOC/'
    need_num = 12
    epoch_num = 100
    steps_per_epoch = 100
    gen = Sets_Generation(save_path + 'picture')
    gen.generate(save_path, need_num)
    gen.sets_partitio(save_path)
    gen.train_model(save_path, epoch_num, steps_per_epoch)

