# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.keras import layers
import configparser
import tensorflow as tf
import numpy as np
import random
import sys
import re
import os


def create_graph(scalar_N, conv1d_P, conv2d_P,
                 cnn1d_filters, cnn1d_filter_height, cnn1d_pool_rates,
                 cnn2d_filters, cnn2d_filter_height, cnn2d_filter_width,
                 cnn2d_pool_rates_height, cnn2d_pool_rates_width,
                 K, dlambda, ns_hidden):
    """
    create_graph创建神经网络,分为两部分.卷积部分\全链接部分
    一 卷积部分
    @para scalar_N 是标量的特征，直接进入FNN
    @para conv1d_P 是参加一维卷积的向量参数，[长度、1, 波段数]
    @para conv2d_P 是参加二维卷积的向量参数，[高度、宽度、波段数]
    二 全连接部分
    @para n 输入特征数,会自动计算。
    @para K 输出向量维度
    @para dlambda 正则化小数 0.00001
    @para ns_hidden [第一隐层数目，第二隐层数目,...最后一个隐层数目 ]
    @return 图,可用get_tensor_by_name等函数获得其中的各类结构，主要结构：
    """
    ns_array = ns_hidden[:]
    #Output is the last layer, append to last
    #最后一层为线性组合的输出，以便输出任意范围的值
    #ns_array.append(K)
    hidden_layer_size = len(ns_array)
    cnn1d_pool_times = len(cnn1d_pool_rates)
    cnn2d_pool_times = len(cnn2d_pool_rates_height)
    #--------------------------------------------------------------
    #1D Conv
    model_cnn1 = tf.keras.Sequential()
    for idx in range(0, cnn1d_pool_times):
        model_cnn1.add(layers.Conv1D(
            cnn1d_filters[idx],
            kernel_size=cnn1d_filter_height[idx],
            strides=1, activation=tf.nn.leaky_relu))
        model_cnn1.add(layers.MaxPooling1D(
            pool_size=cnn1d_pool_rates[idx], strides=cnn1d_pool_rates[idx]))
    model_cnn1.add(layers.Flatten())
    #2D Conv
    model_cnn2 = tf.keras.Sequential()
    for idx in range(0, cnn2d_pool_times):
        model_cnn2.add(layers.Conv2D(
            cnn2d_filters[idx],
            kernel_size=(cnn2d_filter_height[idx], cnn2d_filter_width[idx]),
            strides=1, activation=tf.nn.leaky_relu))  # 第二个卷积层, 16 个3x3 卷积核
        model_cnn2.add(layers.MaxPooling2D(
            pool_size=(
                cnn2d_pool_rates_height[idx], cnn2d_pool_rates_width[idx]),
            strides=(
                cnn2d_pool_rates_height[idx], cnn2d_pool_rates_width[idx])))

    model_cnn2.add(layers.Flatten())  # 打平层，方便全连接层处理
    #FULL FNN
    model_fnn = tf.keras.Sequential()

    for idx in range(0, hidden_layer_size):
        model_fnn.add(layers.Dense(
            ns_array[idx], activation=tf.nn.leaky_relu,
            kernel_regularizer=tf.keras.regularizers.l2(dlambda)))
    model_fnn.add(layers.Dense(K, activation=None,kernel_regularizer=tf.keras.regularizers.l2(dlambda)));
    
    #全连接网络的输入
    input_scalar = tf.keras.Input(shape=(scalar_N,))
    input_1d = tf.keras.Input(shape=(conv1d_P[0],conv1d_P[2],))
    input_2d = tf.keras.Input(shape=(conv2d_P[0],conv2d_P[1],conv2d_P[2],))
    
    data_1d = model_cnn1(input_1d)
    data_2d = model_cnn2(input_2d)
    combined = tf.keras.layers.concatenate([input_scalar,data_1d,data_2d])

    final_out = model_fnn(combined)
    #train
    from tensorflow.keras import optimizers,losses
    model_root = tf.keras.Model(inputs = [input_scalar,input_1d,input_2d],outputs=final_out)
    model_root.compile(optimizer=optimizers.Adam(lr=0.001),
        loss=losses.MeanSquaredError(),
        metrics=['accuracy'] # 设置测量指标为准确率
        );   
    print('CNN1:');
    model_cnn1.summary();
    print('CNN2:');
    model_cnn2.summary();
    print('FNN:');
    model_fnn.summary();
    print('MIXNN:');
    model_root.summary();

    return {"graph": model_root}


def stringlist2array(string):
    #split each layer size
    reobj = re.compile('[\s,\"]')
    ls_array = reobj.split(string)
    #remove null strings
    ls_array = [item for item in filter(lambda x:x != '', ls_array)]
    #get hidden layer size
    length_arr = len(ls_array)

    #convert
    ns_array = []
    for idx in range(0, length_arr):
        ns_array.append(int(ls_array[idx]))
    return ns_array


def read_config_file(trainning_task_file):
    """
    read_config_file函数用来从配置文件读取参数
    返回一个字典，主要包括如下参数：
       参数名          意义                   对应配置文件路径
    1. n               输入向量维度（特征数） fnn/input_nodes
    2. K               输出向量维度（判决数） fnn/output_nodes
    3. dlambda         正则化参数（0,001）    fnn/lambda
    4. ns_array        各个隐层的规模，list   fnn/hidden_layer_size
    5. file_deal_times 本训练文件训练次数     fnn/file_deal_times
    6. trunk           参与训练样本数         fnn/trunk
    7. train_step      参与训练窗口下移样本数 fnn/train_step
    8. iterate_times   参与训练窗口迭代次数   fnn/iterate_times
    """
    #reading config file
    config = configparser.ConfigParser()
    config.read(trainning_task_file)
    scalar_n = int(config['fnn']['scalar_n'])     # input vector size
    K = int(config['fnn']['output_nodes'])     # output vector size
    lamda = float(config['fnn']['lambda'])
    #hidden layer size, string split with ",",like ”16,16,13“
    ns_array = stringlist2array(config['fnn']['hidden_layer_size'])

    file_deal_times = int(config['performance']['file_deal_times'])
    trunk = int(config['performance']['trunk'])
    iterate_times = int(config['performance']['iterate_times'])

    cnn1d_height = int(config['cnn1d']['height'])
    cnn1d_channels = int(config['cnn1d']['channels'])
    cnn1d_filters = stringlist2array(config['cnn1d']['filters'])
    cnn1d_filter_height = stringlist2array(config['cnn1d']['filter_height'])
    cnn1d_pool_rates = stringlist2array(config['cnn1d']['pool_rates'])

    cnn2d_height = int(config['cnn2d']['height'])
    cnn2d_width = int(config['cnn2d']['width'])
    cnn2d_channels = int(config['cnn2d']['channels'])
    cnn2d_filters = stringlist2array(config['cnn2d']['filters'])
    cnn2d_filter_height = stringlist2array(config['cnn2d']['filter_height'])
    cnn2d_filter_width = stringlist2array(config['cnn2d']['filter_width'])
    cnn2d_pool_rates_height = stringlist2array(config['cnn2d']
                                               ['pool_rates_height'])
    cnn2d_pool_rates_width = stringlist2array(config['cnn2d']
                                              ['pool_rates_width'])
    print("Config file red from disk ok.")
    sys.stdout.flush()

    return {"scalar_N": scalar_n,
            "cnn1d": [cnn1d_height, 1, cnn1d_channels],
            "cnn2d": [cnn2d_height, cnn2d_width, cnn2d_channels],
            "cnn1d_filters": cnn1d_filters,
            "cnn1d_filter_height": cnn1d_filter_height,
            "cnn1d_pool_rates": cnn1d_pool_rates,
            "cnn2d_filters": cnn2d_filters,
            "cnn2d_filter_height": cnn2d_filter_height,
            "cnn2d_filter_width": cnn2d_filter_width,
            "cnn2d_pool_rates_height": cnn2d_pool_rates_height,
            "cnn2d_pool_rates_width": cnn2d_pool_rates_width,
            "K": K, "dlambda": lamda, "ns_array": ns_array,
            "file_deal_times": file_deal_times, "trunk": trunk,
            "iterate_times": iterate_times}


def init_graph(graph, sess):
    """
    init_graph函数用来在一个会话中初始化一个图
    图是一个结构，会话是tf用来运行任务的上下文。
    """
    with graph.as_default():
        #save graph to Disk
        init = tf.global_variables_initializer()
        sess.run(init)          # Very important


def readnext_train_batch(strpath, n_scalars, cnn1_size, cnn2_size, K):
    """
    按照给定的文件夹、个数，任意选取bsize组文件。
    """
    if (cnn1_size[0] != cnn2_size[0]):
        return []
    trunk = cnn1_size[0]
    #trainning
    scalar_data = np.zeros([trunk, n_scalars]).astype(np.float32)
    cnn1_data = np.zeros(cnn1_size).astype(np.float32)
    cnn2_data = np.zeros(cnn2_size).astype(np.float32)
    #read n features and K outputs
    y_data = np.zeros([trunk, K]).astype(np.float32)

    #获得所有文件名
    set_filenames = set([])
    lst_files = os.listdir(strpath)
    for filename in lst_files:
        main_name = os.path.splitext(filename)
        set_filenames.update([main_name[0]])

    main_names = list(set_filenames)
    total_names = len(main_names)

    curr_swim = 0
    while (curr_swim < trunk):
        sel = int(random.uniform(0, total_names-0.1))
        #文件名
        fn_scalar = strpath + '/' + main_names[sel] + '.scalar'
        fn_1d = strpath + '/' + main_names[sel] + '.1d'
        fn_2d = strpath + '/' + main_names[sel] + '.2d'
        fn_train = strpath + '/' + main_names[sel] + '.train'

        dt_scalar = np.fromfile(fn_scalar)
        dt_1d = np.fromfile(fn_1d)
        dt_2d = np.fromfile(fn_2d)
        dt_train = np.fromfile(fn_train)

        if (len(dt_scalar) != n_scalars):
            print('scalar length in file is not equal to cfg')
            continue
        else:
            scalar_data[curr_swim] = dt_scalar

        if (len(dt_1d) != cnn1_size[1]*cnn1_size[2]):
            print('c1 length in file is not equal to cfg')
            continue
        else:
            cnn1_data[curr_swim] = np.reshape(dt_1d, [cnn1_size[1],
                                                      cnn1_size[2]]
                                              )
        if (len(dt_2d) != cnn2_size[1]*cnn2_size[2]*cnn2_size[3]):
            print('c2 length in file is not equal to cfg')
            continue
        else:
            cnn2_data[curr_swim] = np.reshape(dt_2d, [cnn2_size[1],
                                                      cnn2_size[2], cnn2_size[3]]
                                              )
        if (len(dt_train) != K):
            print('K length in file is not equal to cfg')
            continue
        else:
            y_data[curr_swim] = dt_train
        curr_swim = curr_swim + 1
    return [scalar_data, cnn1_data, cnn2_data, y_data]


def readnext_run_batch(strpath, n_scalars, cnn1_size, cnn2_size, K, begin, test):
    """
    按照给定的文件夹、个数，任意选取bsize组文件。
    """
    if (cnn1_size[0] != cnn2_size[0]):
        return []
    #获得所有文件名
    set_filenames = set([])
    lst_files = os.listdir(strpath)
    for filename in lst_files:
        main_name = os.path.splitext(filename)
        set_filenames.update([main_name[0]])

    main_names = list(set_filenames)
    total_names = len(main_names)

    trunk = cnn1_size[0]
    real_cnn1_size = cnn1_size
    real_cnn2_size = cnn2_size
    if (trunk > total_names):
        trunk = total_names
        real_cnn1_size[0] = total_names
        real_cnn2_size[0] = total_names

    scalar_data = np.zeros([trunk, n_scalars]).astype(np.float32)
    cnn1_data = np.zeros(real_cnn1_size).astype(np.float32)
    cnn2_data = np.zeros(real_cnn2_size).astype(np.float32)
    #read n features and K outputs
    y_data = np.zeros([trunk, K]).astype(np.float32)

    curr_swim = 0
    sel = 0

    #记录文件名
    list_result_filename = []
    while (curr_swim < trunk):
        sel = begin + curr_swim
        if (sel >= total_names):
            break
        #文件名
        fn_scalar = strpath + '/' + main_names[sel] + '.scalar'
        fn_1d = strpath + '/' + main_names[sel] + '.1d'
        fn_2d = strpath + '/' + main_names[sel] + '.2d'
        fn_train = strpath + '/' + main_names[sel] + '.train'

        list_result_filename.append(
            strpath + '/' + main_names[sel] + '.result')

        dt_scalar = np.fromfile(fn_scalar)
        dt_1d = np.fromfile(fn_1d)
        dt_2d = np.fromfile(fn_2d)

        if (len(dt_scalar) != n_scalars):
            print('scalar length in file is not equal to cfg')
        else:
            scalar_data[curr_swim] = dt_scalar

        if (len(dt_1d) != cnn1_size[1]*cnn1_size[2]):
            print('c1 length in file is not equal to cfg')
        else:
            cnn1_data[curr_swim] = np.reshape(dt_1d, [cnn1_size[1],
                                                      cnn1_size[2]]
                                              )
        if (len(dt_2d) != cnn2_size[1]*cnn2_size[2]*cnn2_size[3]):
            print('c2 length in file is not equal to cfg')
        else:
            cnn2_data[curr_swim] = np.reshape(dt_2d, [cnn2_size[1],
                                                      cnn2_size[2], cnn2_size[3]]
                                              )
        if (test > 0):
            dt_train = np.fromfile(fn_train)
            if (len(dt_train) != K):
                print('K length in file is not equal to cfg')
            else:
                y_data[curr_swim] = dt_train
        curr_swim = curr_swim + 1

    nextbg = 0
    if (sel + 1 < total_names):
        nextbg = sel + 1
    return [scalar_data, cnn1_data, cnn2_data, y_data, nextbg, list_result_filename]


def train_model(trainning_task_file, trainning_input_path, model_path, first,
                summary_path="./network"):
    """
    #train_model函数用来训练模型
    @para trainning_task_file  cfg文件名，参见函数 read_config_file
    @para trainning_input_path 训练样本文件夹。每个文件夹有N组样本，
                                以000000000开始依次递增。扩展名有4个
                                .scalar:标量组
                                .1d: 一维卷积组，每个channel放一起,double
                                .2d: 二维卷积组，每个channel放一起,double
                                .train:理想结果,double               
    @para model_path           存储训练结果的路径
    @para first                是否是首次训练。=0为增量学习，model_path必须存在。
    @return 返回学习曲线
    """
    #reading config file
    cfObj = read_config_file(trainning_task_file)
    n = cfObj["scalar_N"]
    K = cfObj["K"]
    lamda = cfObj["dlambda"]
    ns_array = cfObj["ns_array"]
    cnn1d = cfObj["cnn1d"]
    cnn2d = cfObj["cnn2d"]

    cnn1d_height = cnn1d[0]
    cnn1d_channels = cnn1d[2]
    cnn2d_height = cnn2d[0]
    cnn2d_width = cnn2d[1]
    cnn2d_channels = cnn2d[2]

    cnn1d_filters = cfObj["cnn1d_filters"]
    cnn1d_filter_height = cfObj["cnn1d_filter_height"]
    cnn1d_pool_rates = cfObj["cnn1d_pool_rates"]
    cnn2d_filters = cfObj["cnn2d_filters"]
    cnn2d_filter_height = cfObj["cnn2d_filter_height"]
    cnn2d_filter_width = cfObj["cnn2d_filter_width"]
    cnn2d_pool_rates_height = cfObj["cnn2d_pool_rates_height"]
    cnn2d_pool_rates_width = cfObj["cnn2d_pool_rates_width"]
    #create graph
    net_wk = create_graph(n, cnn1d, cnn2d,
                          cnn1d_filters, cnn1d_filter_height, cnn1d_pool_rates,
                          cnn2d_filters, cnn2d_filter_height, cnn2d_filter_width,
                          cnn2d_pool_rates_height,   cnn2d_pool_rates_width,
                          K, lamda, ns_array)
    graph = net_wk["graph"]

    if (first == 0):
        graph.load_weights(model_path + '/weights.ckpt');

    #from tensorflow.keras.callbacks import TensorBoard
    #tbcbk = TensorBoard(log_dir=summary_path)

    file_deal_times = cfObj["file_deal_times"]
    trunk = cfObj["trunk"]
    iterate_times = cfObj["iterate_times"]

 
    for rc in range(file_deal_times):
        [scalar_data, cnn1_data, cnn2_data, y_data] = readnext_train_batch(
            trainning_input_path, n,
            [trunk, cnn1d_height, cnn1d_channels],
            [trunk, cnn2d_height, cnn2d_width, cnn2d_channels], K
        )
        #trainning
        #history = graph.fit([scalar_data,cnn1_data,cnn2_data], y_data,epochs=iterate_times,
        #                    callbacks=[tbcbk]);
        history = graph.fit([scalar_data,cnn1_data,cnn2_data], y_data,epochs=iterate_times);
        print('Save Model to '+ model_path + '/weights.ckpt')
        graph.save_weights(model_path + '/weights.ckpt'); 
        sys.stdout.flush()
        sys.stderr.flush()

    
    return { "graph": graph}


def test_model(trainning_task_file, testing_path, model_path):
    """
    test_model函数用来测试模型的收敛
    @para trainning_task_file  cfg文件名，参见函数 read_config_file
    @para testing_path 训练样本文件夹。每个文件夹有N组样本，
                                以000000000开始依次递增。扩展名有4个
                                .scalar:标量组，共channel组标量,double
                                .1d: 一维卷积组，每个channel放一起,double
                                .2d: 二维卷积组，每个channel放一起,double
                                .train:理想结果,double               
    @para model_path           存储训练结果的路径
    @return 返回学习曲线
    """
    #reading config file
    cfObj = read_config_file(trainning_task_file)
    n = cfObj["scalar_N"]
    K = cfObj["K"]
    lamda = cfObj["dlambda"]
    ns_array = cfObj["ns_array"]
    cnn1d = cfObj["cnn1d"]
    cnn2d = cfObj["cnn2d"]

    cnn1d_height = cnn1d[0]
    cnn1d_channels = cnn1d[2]
    cnn2d_height = cnn2d[0]
    cnn2d_width = cnn2d[1]
    cnn2d_channels = cnn2d[2]

    cnn1d_filters = cfObj["cnn1d_filters"]
    cnn1d_filter_height = cfObj["cnn1d_filter_height"]
    cnn1d_pool_rates = cfObj["cnn1d_pool_rates"]
    cnn2d_filters = cfObj["cnn2d_filters"]
    cnn2d_filter_height = cfObj["cnn2d_filter_height"]
    cnn2d_filter_width = cfObj["cnn2d_filter_width"]
    cnn2d_pool_rates_height = cfObj["cnn2d_pool_rates_height"]
    cnn2d_pool_rates_width = cfObj["cnn2d_pool_rates_width"]
    #create graph
    net_wk = create_graph(n, cnn1d, cnn2d,
                          cnn1d_filters, cnn1d_filter_height, cnn1d_pool_rates,
                          cnn2d_filters, cnn2d_filter_height, cnn2d_filter_width,
                          cnn2d_pool_rates_height,   cnn2d_pool_rates_width,
                          K, lamda, ns_array)

    #create graph
    graph = net_wk["graph"]
    
    graph.load_weights(model_path + '/weights.ckpt');
   

 
    #--------------------------------------------------------------
    trunk = cfObj["trunk"]
    finished = 0
    bg = 0
    while(finished == 0):
        [scalar_data, cnn1_data, cnn2_data, y_data, bg, fns] = readnext_run_batch(
            testing_path, n,
            [trunk, cnn1d_height, cnn1d_channels],
            [trunk, cnn2d_height, cnn2d_width, cnn2d_channels], K, bg, 1
        )
        #print loss
        graph.evaluate([scalar_data,cnn1_data,cnn2_data],y_data)
        sys.stdout.flush()
        sys.stderr.flush()
      
    return {"graph": graph}


def run_model(trainning_task_file, input_path, model_path, output_file):
    """
    run_model函数用来应用模型
    @para trainning_task_file  cfg文件名，参见函数 read_config_file
    @para input_path 样本文件夹。每个文件夹有N组样本，
                                以000000000开始依次递增。扩展名有3个
                                .scalar:标量组，共channel组标量,double
                                .1d: 一维卷积组，每个channel放一起,double
                                .2d: 二维卷积组，每个channel放一起,double
    @para model_path           存储训练结果的路径
    @para output_file          输出文件名。每行一组样本，
                                包含逗号分割的[特征向量],[输出结果]
    @return 返回字典：样本(x_test)、结果(result)
    """
    #reading config file
    cfObj = read_config_file(trainning_task_file)
    n = cfObj["scalar_N"]
    K = cfObj["K"]
    lamda = cfObj["dlambda"]
    ns_array = cfObj["ns_array"]
    cnn1d = cfObj["cnn1d"]
    cnn2d = cfObj["cnn2d"]

    cnn1d_height = cnn1d[0]
    cnn1d_channels = cnn1d[2]
    cnn2d_height = cnn2d[0]
    cnn2d_width = cnn2d[1]
    cnn2d_channels = cnn2d[2]
    #create graph
    cnn1d_filters = cfObj["cnn1d_filters"]
    cnn1d_filter_height = cfObj["cnn1d_filter_height"]
    cnn1d_pool_rates = cfObj["cnn1d_pool_rates"]
    cnn2d_filters = cfObj["cnn2d_filters"]
    cnn2d_filter_height = cfObj["cnn2d_filter_height"]
    cnn2d_filter_width = cfObj["cnn2d_filter_width"]
    cnn2d_pool_rates_height = cfObj["cnn2d_pool_rates_height"]
    cnn2d_pool_rates_width = cfObj["cnn2d_pool_rates_width"]
    #create graph
    net_wk = create_graph(n, cnn1d, cnn2d,
                          cnn1d_filters, cnn1d_filter_height, cnn1d_pool_rates,
                          cnn2d_filters, cnn2d_filter_height, cnn2d_filter_width,
                          cnn2d_pool_rates_height,   cnn2d_pool_rates_width,
                          K, lamda, ns_array)
    #create graph
    graph = net_wk["graph"]
    graph.load_weights(model_path + '/weights.ckpt');
 
    #--------------------------------------------------------------
    trunk = cfObj["trunk"]

 
    finished = 0
    bg = 0
    while(finished == 0):
        [scalar_data, cnn1_data, cnn2_data, y_data, bg, fns] = readnext_run_batch(
            input_path, n,
            [trunk, cnn1d_height, cnn1d_channels],
            [trunk, cnn2d_height, cnn2d_width, cnn2d_channels], K, bg, 0
        )
        result = graph.predict([scalar_data,cnn1_data,cnn2_data])
        sys.stdout.flush()
        sys.stderr.flush()
        if (bg == 0):
            finished = 1
        ct = 0
        for filename in fns:
            resa = result[ct].astype(np.float64)
            print(filename)
            resa.tofile(filename)
            ct = ct + 1
    return {"graph": graph}


if __name__ == "__main__":
	print('This File Should Be Called from Main module');
	#train_model("d:\\车牌识别\\蓝牌.qnprj","D:/车牌识别/train","d:/车牌识别/蓝牌/Saved/Model",1,"d:/车牌识别/蓝牌/Saved/summary");
	#test_model("d:\\车牌识别\\蓝牌.qnprj","D:/车牌识别/test","d:/车牌识别/蓝牌/Saved/Model");
	#run_model("d:\\车牌识别\\蓝牌.qnprj","D:/车牌识别/test","d:/车牌识别/蓝牌/Saved/Model","D:/车牌识别/test");
