# -*- coding: utf-8 -*-
import configparser
import re
import tensorflow as tf
import numpy as np
import sys
from tensorflow.keras.callbacks import TensorBoard

def create_graph(n, K, dlambda, ns_hidden):
    """
    create_graph创建神经网络
    @para n 输入特征数
    @para K 输出向量维度
    @para dlambda 正则化小数 0.00001
    @para ns_hidden [第一隐层数目，第二隐层数目,...最后一个隐层数目 ]
    @return 模型
    """
    from tensorflow.keras import layers,Sequential;
    #Output is the last layer, append to last
    hidden_layer_size = len(ns_hidden);
    #--------------------------------------------------------------
    #create graph
    model = Sequential([]);
    for idx in range(0, hidden_layer_size):
        model.add(layers.Dense(ns_hidden[idx], activation=tf.nn.tanh,
                               kernel_regularizer=tf.keras.regularizers.l1(dlambda)));
    model.add(layers.Dense(K, activation=None));
    
    model.build(input_shape=(1,n));
    
    from tensorflow.keras import optimizers,losses
    model.compile(optimizer=optimizers.Adam(lr=0.001),
        loss=losses.MeanSquaredError(),
        metrics=['accuracy'] # 设置测量指标为准确率
        );   
    
    
    model.summary()
   
    return {"graph":model}

def read_config_file(trainning_task_file)    :
    """
    read_config_file函数用来从配置文件读取参数
    返回一个字典，主要包括如下参数：
       参数名          意义                   对应配置文件路径
    1. n               输入向量维度（特征数） network/input_nodes
    2. K               输出向量维度（判决数） network/output_nodes
    3. dlambda         正则化参数（0,001）    network/lambda
    4. ns_array        各个隐层的规模，list   network/hidden_layer_size
    5. file_deal_times 本训练文件训练次数     network/file_deal_times
    6. trunk           参与训练样本数         network/trunk
    7. train_step      参与训练窗口下移样本数 network/train_step
    8. iterate_times   参与训练窗口迭代次数   network/iterate_times
    """
    #reading config file
    config = configparser.ConfigParser()
    config.read(trainning_task_file)
    n       = int(config['network']['input_nodes'])     # input vector size
    K       = int(config['network']['output_nodes'])     # output vector size
    lamda           = float(config['network']['lambda'])
    #hidden layer size, string split with ",",like ”16,16,13“ 
    hidden_layer_size = config['network']['hidden_layer_size'] 
    #split each layer size
    reobj = re.compile('[\s,\"]')
    ls_array = reobj.split(hidden_layer_size);
    #remove null strings
    ls_array = [item for item in filter(lambda x:x != '', ls_array)] 
    #get hidden layer size
    hidden_layer_size =  len(ls_array);
    
    #convert 
    ns_array = []
    for idx in range(0,hidden_layer_size)    :
        ns_array.append(int(ls_array[idx]))

    file_deal_times = int(config['performance']['file_deal_times'])
    trunk           = int(config['performance']['trunk'])
    train_step      = int(config['performance']['train_step'])
    iterate_times   = int(config['performance']['iterate_times'])

    return {"n":n,"K":K,"dlambda":lamda,"ns_array":ns_array,\
            "file_deal_times":file_deal_times,"trunk":trunk,\
            "train_step":train_step,"iterate_times":iterate_times}

def train_model(trainning_task_file,trainning_input_file,model_path,first,\
                summary_path = "./network"):
    """
    #train_model函数用来训练模型
    @para trainning_task_file  cfg文件名，参见函数 read_config_file
    @para trainning_input_file 训练样本文件名。每行一组样本，包含逗号\
                               分割的特征向量,训练向量（标签或者理论输出）
    @para model_path           存储训练结果的路径
    @para first                是否是首次训练。=0为增量学习，model_path必须存在。
    @return 返回学习曲线
    """
    #reading config file
    cfObj = read_config_file(trainning_task_file)
    n               = cfObj["n"]
    K               = cfObj["K"]
    lamda           = cfObj["dlambda"]
    ns_array        = cfObj["ns_array"]
    
    #create graph
    net_wk = create_graph(n,K,lamda,ns_array)
    graph = net_wk["graph"]
        
    ### create tensorflow structure end ###
    if (first == 0):
        graph.load_weights(model_path + '/weights.ckpt');
            
    file_deal_times = cfObj["file_deal_times"]
    trunk           = cfObj["trunk"]
    train_step      = cfObj["train_step"]
    iterate_times   = cfObj["iterate_times"]
    #trainning
    x_data = np.zeros([trunk,n]).astype(np.float32)
    #read n features and K outputs
    y_data = np.zeros([trunk,K]).astype(np.float32)
    total_red = 0
    
   
    #训练情况可视化 
    tbcbk = TensorBoard(log_dir=summary_path)
    
    reobj = re.compile('[\s,\"]')
    
    trk = np.ones([1,1]) * iterate_times;
    #tstep = 0;
    
    for rc in range(file_deal_times):
        with open(trainning_input_file, 'rt') as ftr:
            while 1:
                lines = ftr.readlines()
                if not lines:
                    #reach end of file, run trainning for tail items if \
                    #there is some.
                    if (total_red>0):
                         history = graph.fit(x_data, y_data,epochs=iterate_times,
                                            callbacks=[tbcbk]);
                    break
                line_count = len(lines)
                for lct in range(line_count):
                    x_arr = reobj.split(lines[lct]);
                    #remove null strings
                    x_arr = [item for item in filter(lambda x:x != '', x_arr)] 
                    for idx in range(n)    :
                        x_data[total_red % trunk,idx] = float(x_arr[idx])
                    for idx in range(K)    :    
                        y_data[total_red % trunk,idx] = float(x_arr[idx+n])           
                    total_red = total_red + 1
                    #the trainning set run trainning
                    if (total_red % train_step == 0):
                        print("TRAIN!");
                        history = graph.fit(x_data, y_data,epochs=iterate_times,
                                            callbacks=[tbcbk]);
                        graph.save_weights(model_path + '/weights.ckpt'); 
                        #with summary_writer.as_default():
                        #    tfs.scalar('train-loss', history.history['loss'][0],step=tstep)
                        #    tstep += train_step
            
    #saving
    # 保存，这次就可以成功了
    graph.save_weights(model_path + '/weights.ckpt');
    return {"graph":graph};
     
def test_model(trainning_task_file,testing_file,model_path):
    """
    test_model函数用来测试模型的收敛
    @para trainning_task_file  cfg文件名，参见函数 read_config_file
    @para testing_file         测试样本文件名。每行一组样本，包含逗号分割的特征向\
                                量,训练向量（标签或者理论输出）
    @para model_path           存储训练结果的路径
    @return 返回学习曲线
    """
    #reading config file
    cfObj = read_config_file(trainning_task_file)
    n               = cfObj["n"]
    K               = cfObj["K"]
    lamda           = cfObj["dlambda"]
    ns_array        = cfObj["ns_array"]
    
    #create graph
    net_wk = create_graph(n,K,lamda,ns_array)
    graph = net_wk["graph"]
        
    ### create tensorflow structure end ###
    check_point_path = model_path # 保存好模型的文件路径
    graph.load_weights(model_path + '/weights.ckpt');
            
    #--------------------------------------------------------------
    trunk           = cfObj["trunk"]
    train_step      = cfObj["train_step"]
    
    print ("Testing...")
    sys.stdout.flush()
    #testing
    x_test = np.zeros([trunk,n]).astype(np.float32)
    #read n features and K outputs
    y_test = np.zeros([trunk,K]).astype(np.float32)
 
    total_red = 0
    reobj = re.compile('[\s,\"]')
    with open(testing_file, 'rt') as testfile:
        while 1:
            lines = testfile.readlines()
            if not lines:
                break
            line_count = len(lines)
            for lct in range(line_count):
                x_arr = reobj.split(lines[lct]);
                #remove null strings
                x_arr = [item for item in filter(lambda x:x != '', x_arr)] 
                for idx in range(n)    :
                    x_test[total_red % trunk,idx] = float(x_arr[idx])
                for idx in range(K)    :    
                    y_test[total_red % trunk,idx] = float(x_arr[idx+n])           
                total_red = total_red + 1
                #the trainning set run trainning
                if (total_red % train_step == 0):
                    graph.evaluate(x_test,y_test)
    
    return {"graph":graph};

def run_model(trainning_task_file,input_file,model_path,output_file):
    """
    run_model函数用来应用模型
    @para trainning_task_file  cfg文件名，参见函数 read_config_file
    @para input_file           输入文件名。每行一组样本，包含逗号分割的特征向量
    @para model_path           存储训练结果的路径
    @para output_file          输出文件名。每行一组样本，
                                包含逗号分割的[特征向量],[输出结果]
    @return 返回字典：样本(x_test)、结果(result)
    """
    #reading config file
    cfObj = read_config_file(trainning_task_file)
    n               = cfObj["n"]
    K               = cfObj["K"]
    lamda           = cfObj["dlambda"]
    ns_array        = cfObj["ns_array"]
    #--------------------------------------------------------------
    #create graph
    net_wk = create_graph(n,K,lamda,ns_array)
    graph = net_wk["graph"]
        
    ### create tensorflow structure end ###
    check_point_path = model_path # 保存好模型的文件路径
    graph.load_weights(model_path + '/weights.ckpt');
    reobj = re.compile('[\s,\"]')
    #--------------------------------------------------------------
    print ("Running...")
    sys.stdout.flush()
    with open(input_file, 'rt') as testfile:
        with open(output_file, 'wt') as resultfile:    
            while 1:
                lines = testfile.readlines()
                if not lines:
                    break
                line_count = len(lines)
                x_test = np.zeros([line_count,n]).astype(np.float32)
                for lct in range(line_count):
                    x_arr = reobj.split(lines[lct]);
                    #remove null strings
                    x_arr = [item for item in filter(lambda x:x != '', x_arr)] 
                    for idx in range(n)    :
                        x_test[lct,idx] = float(x_arr[idx])
                #the trainning set run trainning
                result = graph.predict(x_test)
                for idx in range(line_count):
                    print(x_test[idx].tolist(),result[idx].tolist(),\
                          file = resultfile)
                    sys.stdout.flush()
    return {"x_test":x_test,"result":result,"graph":graph}
   

if __name__ == "__main__":
	print('This File Should Be Called from Main module');
	#create_graph(3,3,1e-7,[16,16,16]);
	#train_model("D:/fnn_test/train_task.cfg","D:/fnn_test/train_input.txt","d:/fnn_test/model",1,"d:/fnn_test/summary");
	#test_model("D:/fnn_test/train_task.cfg","D:/fnn_test/train_input.txt","d:/fnn_test/model");
	#run_model("D:/fnn_test/train_task.cfg","D:/fnn_test/train_input.txt","d:/fnn_test/model","D:/fnn_test/result.txt");