# -*- coding: utf-8 -*-
from tensorflow import summary as tfs
import configparser
import tensorflow as tf
import numpy as np
import random
import sys
import re
import os

def create_graph(scalar_N,conv1d_P,conv2d_P,
                 cnn1d_filters,cnn1d_filter_height,cnn1d_pool_rates,
                 cnn2d_filters,cnn2d_filter_height,cnn2d_filter_width,
                 cnn2d_pool_rates_height,cnn2d_pool_rates_width,
                 K,dlambda,ns_hidden):  
    """
    create_graph创建神经网络,分为两部分.卷积部分\全链接部分
    一 卷积部分
    @para scalar_N 是标量的特征，直接进入FNN
    @para conv1d_P 是参加一维卷积的向量参数，[长度、      波段数, 
                                             特征器组个数, 特征器长度,
                                             降维采样次数]
    @para conv2d_P 是参加二维卷积的向量参数，[高度、宽度、波段数，
                                             特征器组个数,特征器高,特征器宽,
                                             降维采样次数]
    二 全连接部分
    @para n 输入特征数,会自动计算。
    @para K 输出向量维度
    @para dlambda 正则化小数 0.00001
    @para ns_hidden [第一隐层数目，第二隐层数目,...最后一个隐层数目 ]
    @return 图,可用get_tensor_by_name等函数获得其中的各类结构，主要结构：
    """
    ns_array = ns_hidden[:]
    #Output is the last layer, append to last
    ns_array.append(K)
    #最后一层为线性组合的输出，以便输出任意范围的值
    #ns_array.append(K)
    hidden_layer_size = len(ns_array)
    cnn1d_pool_times = len(cnn1d_pool_rates)
    cnn2d_pool_times = len(cnn2d_pool_rates_height)
    #--------------------------------------------------------------
    #create graph
    graph = tf.Graph()
    with graph.as_default():
        punish = tf.constant(0.0, name='regular')
        with tf.name_scope('cnn1d'):
            in1_vector = tf.placeholder(tf.float32,
                                       [None,conv1d_P[0],conv1d_P[1],
                                        conv1d_P[2]],name="cnn1d_in")
            #进行P[4]次卷积+降维
            data_1d = [in1_vector]
            filters_1d = []
            bais_1d = []
            convres_1d = []
            activat_1d = []
            out1d = []     
            size1d_height = [conv1d_P[0]]
            #池化次数
            for idx in range(0,cnn1d_pool_times)    :
                print(data_1d[idx])
                channles = conv1d_P[2]
                if (idx>0):
                    channles = cnn1d_filters[idx-1]
                #添加新的卷积层滤波器,值-1,1
                filters_1d.append(tf.Variable(np.random.rand(
                        cnn1d_filter_height[idx],#height
                         1,#1d conv always 1
                         channles, #channels
                         cnn1d_filters[idx])*2-1,#filters
                         dtype=np.float32,
                         name="cnn1d_filter"+str(idx)))
                print(filters_1d[idx])
                #偏置量, 若使用非0对称的激活函数，修改为 0,1比较好
                bais_1d.append(tf.Variable(tf.random_uniform(
                        [cnn1d_filters[idx]],-1,1),
                        name='cnn1_b'+str(idx)))
                #惩罚
                punish = punish + tf.reduce_sum(filters_1d[idx]**2)* dlambda
                #卷积运算
                convres_1d.append(tf.nn.conv2d(data_1d[idx],#input
                                               filters_1d[idx],#filters
                                               strides=[1,1,1,1],
                                               padding="SAME")+bais_1d[idx])
                print(convres_1d[idx])
                #激励
                activat_1d.append(tf.nn.tanh(convres_1d[idx]))
                #池化
                data_1d.append(tf.nn.avg_pool(activat_1d[idx],
                                                 [1,cnn1d_pool_rates[idx],1,1],
                                                 strides=
                                                 [1,cnn1d_pool_rates[idx],1,1],
                                                 padding="SAME"
                                                 ))
                #确定池化后的大小
                if (size1d_height[idx]%cnn1d_pool_rates[idx]>0):
                    addition_1d = 1;
                else:
                    addition_1d = 0;
                size1d_height.append(int(size1d_height[idx]/cnn1d_pool_rates[idx])
                    +addition_1d)
                out1d = data_1d[idx+1]
                length_1d = size1d_height[idx+1]
                chan_1d = cnn1d_filters[idx];
                tfs.histogram('cnn1d_filter'+str(idx),filters_1d[idx])
            print(out1d)
                
        with tf.name_scope('cnn2d'):
            in2_vector = tf.placeholder(tf.float32,
                                   [None,conv2d_P[0],conv2d_P[1],conv2d_P[2]],
                                    name="cnn2d_in")
            #进行P[4]次卷积+降维
            data_2d = [in2_vector]
            filters_2d = []
            convres_2d = []
            bais_2d = []
            activat_2d = []
            out2d = []
            size2d_size = [[conv2d_P[0]],[conv2d_P[1]]]
            for idx in range(0,cnn2d_pool_times)    :
                print(data_2d[idx])
                channles = conv2d_P[2]
                if (idx>0):
                    channles = cnn2d_filters[idx-1]
                #滤波器
                filters_2d.append(tf.Variable(np.random.rand(
                        cnn2d_filter_height[idx],  #height
                        cnn2d_filter_width[idx],#width
                         channles, #channels
                         cnn2d_filters[idx])*2-1,#filters
                         dtype=np.float32,
                         name="cnn2d_filter"+str(idx))
                )
                print(filters_2d[idx])
                #惩罚
                punish = punish + tf.reduce_sum(filters_2d[idx]**2)* dlambda
                #偏置量, 若使用非0对称的激活函数，修改为 0,1比较好
                bais_2d.append(tf.Variable(tf.random_uniform([
                        cnn2d_filters[idx]],-1,1),
                     name='cnn2_b'+str(idx)))
                #卷积计算
                convres_2d.append(tf.nn.conv2d(data_2d[idx],#input
                                               filters_2d[idx],#filters
                                               strides=[1,1,1,1],
                                               padding="SAME")+bais_2d[idx])
                #激励
                activat_2d.append(tf.nn.tanh(convres_2d[idx]))
                #池化
                data_2d.append(tf.nn.max_pool(activat_2d[idx],
                                                 [1,
                                                 cnn2d_pool_rates_height[idx],
                                                 cnn2d_pool_rates_width[idx],
                                                  1],strides=[1,
                                                 cnn2d_pool_rates_height[idx],
                                                 cnn2d_pool_rates_width[idx],
                                                 1],padding="SAME"
                                                 ))
                #确定池化后数据的大小
                if (size2d_size[0][idx]%cnn2d_pool_rates_height[idx]):
                    h_addseed = 1;
                else:             
                    h_addseed = 0;
                size2d_size[0].append(int(size2d_size[0][idx]
                                        /cnn2d_pool_rates_height[idx])
                    +h_addseed)
                    
                if (size2d_size[1][idx]%cnn2d_pool_rates_width[idx]):
                    w_addseed = 1;
                else:             
                    w_addseed = 0;

                size2d_size[1].append(int(size2d_size[1][idx]
                                        /cnn2d_pool_rates_width[idx])
                    +w_addseed)
                print(convres_2d[idx])
                out2d = data_2d[idx+1]
                length_2d = [size2d_size[0][idx+1],size2d_size[1][idx+1]]
                chan_2d = cnn2d_filters[idx];
                tfs.histogram('cnn2d_filter'+str(idx),filters_2d[idx])
            print(out2d)
        #特征整合、一维化
        with tf.name_scope('feature_gather'):
            print (length_1d,chan_1d,length_2d,chan_2d)
            reshape1d = tf.reshape(out1d, [tf.shape(out1d)[0], -1])
            reshape2d = tf.reshape(out2d, [tf.shape(out2d)[0], -1])
            scalar_input = tf.placeholder(tf.float32,[None,scalar_N],
                                          name="scalar_input")
            fullinput = tf.concat([scalar_input ,reshape1d, reshape2d], 1,
                                  name="full_input")
            #全连接网络的输入
            n = scalar_N+length_1d*chan_1d  + length_2d[0]*length_2d[1]*chan_2d
            print (n)
        with tf.name_scope('fnn'):
            #传统的全连接神经网络
            s = [n]
            a = [fullinput]
            W = []
            b = []
            z = []            
            for idx in range(0,hidden_layer_size)    :
                s.append(int(ns_array[idx]))
				#若使用非0对称的激活函数，修改为 0,1比较好
                W.append(tf.Variable(tf.random_uniform([s[idx],s[idx+1]],-1,1),
                                     name='W'+str(idx+1)))
                #偏置量, 若使用非0对称的激活函数，修改为 0,1比较好
                b.append(tf.Variable(tf.random_uniform([1,s[idx+1]],-1,1),
                                     name='b'+str(idx+1)))
                z.append(tf.add(tf.matmul(a[idx],W[idx]) , b[idx],
                                name='z'+str(idx+1)))
                if (idx < hidden_layer_size - 1):
                    a_name = 'a'+str(idx+1)
                else:
                    a_name = 'output'
                a.append(tf.nn.tanh(z[idx],name=a_name))
               
                punish = punish + tf.reduce_sum(W[idx]**2) * dlambda
                
                tfs.histogram('W'+str(idx+1),W[idx])
                tfs.histogram('b'+str(idx+1),b[idx])
                tfs.histogram('a'+str(idx+1),a[idx+1])
        #--------------------------------------------------------------
        with tf.name_scope('loss'):
            y_ = tf.placeholder(tf.float32,[None,K],name="tr_out")
            pure_loss = tf.reduce_mean(tf.square(a[hidden_layer_size]-y_),
                                       name="pure_loss")
            loss = tf.add(pure_loss, punish, name="loss")
            tfs.scalar('loss',loss)
            tfs.scalar('punish',punish)
            tfs.scalar('pure_loss',pure_loss)
        with tf.name_scope('train'):        
            optimizer = tf.train.AdamOptimizer(name="optimizer")
            optimizer.minimize(loss,name="train")  
            #记录网络被训练了多少样本次
            train_times = tf.Variable(tf.zeros([1,1]), name='train_times')            
            train_trunk = tf.placeholder(tf.float32,[None,1],
                                         name="train_trunk")
            tf.assign(train_times , train_trunk + train_times,
                      name="train_times_add")            
        merged_summary = tfs.merge_all() 
        sys.stdout.flush()
    return {"graph":graph,"merged_summary":merged_summary}

def stringlist2array(string):
     #split each layer size
    reobj = re.compile('[\s,\"]')
    ls_array = reobj.split(string);
    #remove null strings
    ls_array = [item for item in filter(lambda x:x != '', ls_array)] 
    #get hidden layer size
    length_arr =  len(ls_array);
    
    #convert 
    ns_array = []
    for idx in range(0,length_arr)    :
        ns_array.append(int(ls_array[idx]))
    return ns_array

def read_config_file(trainning_task_file)    :
    """
    read_config_file函数用来从配置文件读取参数
    返回一个字典，主要包括如下参数：
       参数名          意义                   对应配置文件路径
    1. n               输入向量维度（特征数） fnn/input_nodes
    2. K               输出向量维度（判决数） fnn/output_nodes
    3. dlambda         正则化参数（0,001）    fnn/lambda
    4. ns_array        各个隐层的规模，list   fnn/hidden_layer_size
    5. file_deal_times 本训练文件训练次数     fnn/file_deal_times
    6. trunk           参与训练样本数         fnn/trunk
    7. train_step      参与训练窗口下移样本数 fnn/train_step
    8. iterate_times   参与训练窗口迭代次数   fnn/iterate_times
    """
    #reading config file
    config = configparser.ConfigParser()
    config.read(trainning_task_file)
    scalar_n  = int(config['fnn']['scalar_n'])     # input vector size
    K       = int(config['fnn']['output_nodes'])     # output vector size
    lamda           = float(config['fnn']['lambda'])
    #hidden layer size, string split with ",",like ”16,16,13“ 
    ns_array = stringlist2array(config['fnn']['hidden_layer_size'] );
  

    file_deal_times = int(config['performance']['file_deal_times'])
    trunk           = int(config['performance']['trunk'])
    iterate_times   = int(config['performance']['iterate_times'])
    

    cnn1d_height = int(config['cnn1d']['height'])
    cnn1d_channels = int(config['cnn1d']['channels'])
    cnn1d_filters = stringlist2array(config['cnn1d']['filters'])
    cnn1d_filter_height = stringlist2array(config['cnn1d']['filter_height'])
    cnn1d_pool_rates = stringlist2array(config['cnn1d']['pool_rates'])
    
    cnn2d_height = int(config['cnn2d']['height'])
    cnn2d_width = int(config['cnn2d']['width'])
    cnn2d_channels = int(config['cnn2d']['channels'])
    cnn2d_filters = stringlist2array(config['cnn2d']['filters'])
    cnn2d_filter_height = stringlist2array(config['cnn2d']['filter_height'])
    cnn2d_filter_width = stringlist2array(config['cnn2d']['filter_width'])
    cnn2d_pool_rates_height = stringlist2array(config['cnn2d']
                                ['pool_rates_height'])
    cnn2d_pool_rates_width = stringlist2array(config['cnn2d']
                                ['pool_rates_width'])
    print("Config file red from disk ok.")
    sys.stdout.flush()

    return {"scalar_N":scalar_n,
            "cnn1d":[cnn1d_height,1,cnn1d_channels],
            "cnn2d":[cnn2d_height,cnn2d_width,cnn2d_channels], 
            "cnn1d_filters":cnn1d_filters,
            "cnn1d_filter_height":cnn1d_filter_height,
            "cnn1d_pool_rates":cnn1d_pool_rates,
            "cnn2d_filters":cnn2d_filters,
            "cnn2d_filter_height":cnn2d_filter_height,
            "cnn2d_filter_width":cnn2d_filter_width,
            "cnn2d_pool_rates_height":cnn2d_pool_rates_height,
            "cnn2d_pool_rates_width":cnn2d_pool_rates_width,
            "K":K,"dlambda":lamda,"ns_array":ns_array,
            "file_deal_times":file_deal_times,"trunk":trunk,
            "iterate_times":iterate_times}

def init_graph (graph,sess):
    """
    init_graph函数用来在一个会话中初始化一个图
    图是一个结构，会话是tf用来运行任务的上下文。
    """
    with graph.as_default():
        #save graph to Disk
        init = tf.global_variables_initializer()        
        sess.run(init)          # Very important

def readnext_train_batch(strpath,n_scalars,cnn1_size,cnn2_size,K):
    """
    按照给定的文件夹、个数，任意选取bsize组文件。
    """
    if (cnn1_size[0]!=cnn2_size[0]):
        return [];
    trunk = cnn1_size[0];
    #trainning
    scalar_data = np.zeros([trunk,n_scalars]).astype(np.float32)
    cnn1_data   = np.zeros(cnn1_size).astype(np.float32)
    cnn2_data   = np.zeros(cnn2_size).astype(np.float32)
    #read n features and K outputs
    y_data = np.zeros([trunk,K]).astype(np.float32)
    
    #获得所有文件名
    set_filenames = set([]);
    lst_files = os.listdir(strpath)
    for filename in lst_files:
        main_name = os.path.splitext(filename);
        set_filenames.update([main_name[0]]);
    
    main_names = list(set_filenames)
    total_names = len(main_names)
    
    curr_swim = 0;
    while (curr_swim < trunk):
        sel = int(random.uniform(0,total_names-0.1))
        #文件名
        fn_scalar = strpath + '/' + main_names[sel] + '.scalar';
        fn_1d = strpath + '/' + main_names[sel] + '.1d';
        fn_2d = strpath + '/' + main_names[sel] + '.2d';
        fn_train = strpath + '/' + main_names[sel] + '.train';
        
        dt_scalar = np.fromfile(fn_scalar);
        dt_1d = np.fromfile(fn_1d);
        dt_2d = np.fromfile(fn_2d);
        dt_train = np.fromfile(fn_train);
        
        if (len(dt_scalar)!=n_scalars):
            print('scalar length in file is not equal to cfg')
            continue;
        else:
            scalar_data[curr_swim] = dt_scalar;
            
        if (len(dt_1d)!=cnn1_size[1]*cnn1_size[2]*cnn1_size[3]):
            print('c1 length in file is not equal to cfg')
            continue;
        else:
            cnn1_data[curr_swim] = np.reshape(dt_1d,[cnn1_size[1],
                                           cnn1_size[2],cnn1_size[3]]
                );
        if (len(dt_2d)!=cnn2_size[1]*cnn2_size[2]*cnn2_size[3]):
            print('c2 length in file is not equal to cfg')
            continue;
        else:
            cnn2_data[curr_swim] = np.reshape(dt_2d,[cnn2_size[1],
                                           cnn2_size[2],cnn2_size[3]]
                );
        if (len(dt_train)!=K):
            print('K length in file is not equal to cfg')
            continue;
        else:
            y_data[curr_swim] = dt_train
        curr_swim = curr_swim + 1
    return [scalar_data,cnn1_data,cnn2_data,y_data]

def readnext_run_batch(strpath,n_scalars,cnn1_size,cnn2_size,K,begin,test):
    """
    按照给定的文件夹、个数，任意选取bsize组文件。
    """
    if (cnn1_size[0]!=cnn2_size[0]):
        return [];
    #获得所有文件名
    set_filenames = set([]);
    lst_files = os.listdir(strpath)
    for filename in lst_files:
        main_name = os.path.splitext(filename);
        set_filenames.update([main_name[0]]);
    
    main_names = list(set_filenames)
    total_names = len(main_names)

    trunk = cnn1_size[0]
    real_cnn1_size = cnn1_size
    real_cnn2_size = cnn2_size
    if (trunk > total_names):
        trunk = total_names
        real_cnn1_size[0] = total_names;
        real_cnn2_size[0] = total_names;
    
    scalar_data = np.zeros([trunk,n_scalars]).astype(np.float32)
    cnn1_data   = np.zeros(real_cnn1_size).astype(np.float32)
    cnn2_data   = np.zeros(real_cnn2_size).astype(np.float32)
    #read n features and K outputs
    y_data = np.zeros([trunk,K]).astype(np.float32)


    
    curr_swim = 0;
    sel = 0;
    
    #记录文件名
    list_result_filename = [];
    while (curr_swim < trunk):
        sel = begin + curr_swim
        if (sel >= total_names):
            break;
        #文件名
        fn_scalar = strpath + '/' + main_names[sel] + '.scalar';
        fn_1d = strpath + '/' + main_names[sel] + '.1d';
        fn_2d = strpath + '/' + main_names[sel] + '.2d';
        fn_train = strpath + '/' + main_names[sel] + '.train';
        
        list_result_filename.append( strpath + '/' + main_names[sel] + '.result');
        
        dt_scalar = np.fromfile(fn_scalar);
        dt_1d = np.fromfile(fn_1d);
        dt_2d = np.fromfile(fn_2d);
        
        if (len(dt_scalar)!=n_scalars):
            print('scalar length in file is not equal to cfg')
        else:
            scalar_data[curr_swim] = dt_scalar;
            
        if (len(dt_1d)!=cnn1_size[1]*cnn1_size[2]*cnn1_size[3]):
            print('c1 length in file is not equal to cfg')
        else:
            cnn1_data[curr_swim] = np.reshape(dt_1d,[cnn1_size[1],
                                           cnn1_size[2],cnn1_size[3]]
                );
        if (len(dt_2d)!=cnn2_size[1]*cnn2_size[2]*cnn2_size[3]):
            print('c2 length in file is not equal to cfg')
        else:
            cnn2_data[curr_swim] = np.reshape(dt_2d,[cnn2_size[1],
                                           cnn2_size[2],cnn2_size[3]]
                );
        if (test>0):
            dt_train = np.fromfile(fn_train);
            if (len(dt_train)!=K):
                print('K length in file is not equal to cfg')
            else:
                y_data[curr_swim] = dt_train
        curr_swim = curr_swim + 1
    
    nextbg = 0
    if (sel + 1< total_names):
        nextbg = sel + 1
    return [scalar_data,cnn1_data,cnn2_data,y_data,nextbg,list_result_filename]

def train_model(trainning_task_file,trainning_input_path,model_path,first,
                summary_path = "./network"):
    """
    #train_model函数用来训练模型
    @para trainning_task_file  cfg文件名，参见函数 read_config_file
    @para trainning_input_path 训练样本文件夹。每个文件夹有N组样本，
                                以000000000开始依次递增。扩展名有4个
                                .scalar:标量组
                                .1d: 一维卷积组，每个channel放一起,double
                                .2d: 二维卷积组，每个channel放一起,double
                                .train:理想结果,double               
    @para model_path           存储训练结果的路径
    @para first                是否是首次训练。=0为增量学习，model_path必须存在。
    @return 返回学习曲线
    """
    #reading config file
    cfObj = read_config_file(trainning_task_file)
    n               = cfObj["scalar_N"]
    K               = cfObj["K"]
    lamda           = cfObj["dlambda"]
    ns_array        = cfObj["ns_array"]
    cnn1d           = cfObj["cnn1d"]
    cnn2d           = cfObj["cnn2d"]

    cnn1d_height = cnn1d[0]
    cnn1d_channels = cnn1d[2]
    cnn2d_height = cnn2d[0];
    cnn2d_width = cnn2d[1];
    cnn2d_channels = cnn2d[2];

    cnn1d_filters = cfObj["cnn1d_filters"];
    cnn1d_filter_height = cfObj["cnn1d_filter_height"]
    cnn1d_pool_rates = cfObj["cnn1d_pool_rates"]
    cnn2d_filters = cfObj["cnn2d_filters"]
    cnn2d_filter_height = cfObj["cnn2d_filter_height"]
    cnn2d_filter_width = cfObj["cnn2d_filter_width"]
    cnn2d_pool_rates_height = cfObj["cnn2d_pool_rates_height"]
    cnn2d_pool_rates_width = cfObj["cnn2d_pool_rates_width"]
    #create graph
    net_wk = create_graph(n,cnn1d,cnn2d,
                          cnn1d_filters,cnn1d_filter_height,cnn1d_pool_rates,
                          cnn2d_filters,cnn2d_filter_height,cnn2d_filter_width,
                          cnn2d_pool_rates_height,   cnn2d_pool_rates_width,                       
                          K,lamda,ns_array)
    graph = net_wk["graph"]
    merged_summary = net_wk["merged_summary"]
    with graph.as_default():
        #save graph to Disk
        saver = tf.train.Saver()
        loss = graph.get_tensor_by_name("loss/loss:0")
        train = graph.get_operation_by_name("train/train")
        
    ### create tensorflow structure end ###
    sess = tf.Session(graph=graph)
    if (first != 0):
        init_graph(graph,sess)
    else:
        check_point_path = model_path # 保存好模型的文件路径
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir=check_point_path)
        saver.restore(sess,ckpt.model_checkpoint_path)
            
    #writer = tf.summary.FileWriter("./netdemo/")
    #writer.add_graph(sess.graph)
    #writer.close();
    file_deal_times = cfObj["file_deal_times"]
    trunk           = cfObj["trunk"]
    iterate_times   = cfObj["iterate_times"]
 
    #取得各个占位符
    sc_tensor = graph.get_tensor_by_name("feature_gather/scalar_input:0");
    c1_tensor = graph.get_tensor_by_name("cnn1d/cnn1d_in:0");
    c2_tensor = graph.get_tensor_by_name("cnn2d/cnn2d_in:0");
    tout = graph.get_tensor_by_name("loss/tr_out:0");
    
    #训练情况
    train_trunk = graph.get_tensor_by_name("train/train_trunk:0");
    train_times_add = graph.get_tensor_by_name("train/train_times_add:0")
           
    
    writer = tf.summary.FileWriter(summary_path)
    writer.add_graph(graph) 
    
    trk = np.ones([1,1]) * iterate_times;
    x_plot = []
    y_plot = []

    for rc in range(file_deal_times):
        [scalar_data,cnn1_data,cnn2_data,y_data] = readnext_train_batch(
                trainning_input_path,n,
                [trunk,cnn1d_height,1,cnn1d_channels],
                [trunk,cnn2d_height,cnn2d_width,cnn2d_channels],K
                )
        #trainning
        for step in range(iterate_times):
            sess.run(train,feed_dict=
                     {sc_tensor:scalar_data,
                      c1_tensor:cnn1_data,
                      c2_tensor:cnn2_data,
                      tout:y_data})
        #可视化
        t_count = sess.run(train_times_add,
                           feed_dict={train_trunk:trk})
        s = sess.run(merged_summary,
                     feed_dict={sc_tensor:scalar_data,
                      c1_tensor:cnn1_data,
                      c2_tensor:cnn2_data,
                      tout:y_data})
        writer.add_summary(s,t_count)
        #print loss
        lss = sess.run(loss,feed_dict=
                       {sc_tensor:scalar_data,
                       c1_tensor:cnn1_data,
                       c2_tensor:cnn2_data,
                       tout:y_data})
        print(rc,t_count,lss)
        x_plot.append(t_count/1000);
        y_plot.append(lss)
        sys.stdout.flush()
        saver.save(sess,model_path+'/model.ckpt')
            
    #saving
    # 保存，这次就可以成功了
    saver.save(sess,model_path+'/model.ckpt')
    #mp.plot(x_plot,y_plot,'b')
    return {"x_plot":x_plot,"y_plot":y_plot,"graph":graph};
     
def test_model(trainning_task_file,testing_path,model_path):
    """
    test_model函数用来测试模型的收敛
    @para trainning_task_file  cfg文件名，参见函数 read_config_file
    @para testing_path 训练样本文件夹。每个文件夹有N组样本，
                                以000000000开始依次递增。扩展名有4个
                                .scalar:标量组，共channel组标量,double
                                .1d: 一维卷积组，每个channel放一起,double
                                .2d: 二维卷积组，每个channel放一起,double
                                .train:理想结果,double               
    @para model_path           存储训练结果的路径
    @return 返回学习曲线
    """
    #reading config file
    cfObj = read_config_file(trainning_task_file)
    n               = cfObj["scalar_N"]
    K               = cfObj["K"]
    lamda           = cfObj["dlambda"]
    ns_array        = cfObj["ns_array"]
    cnn1d           = cfObj["cnn1d"]
    cnn2d           = cfObj["cnn2d"]
                     
    cnn1d_height = cnn1d[0]
    cnn1d_channels = cnn1d[2]
    cnn2d_height = cnn2d[0];
    cnn2d_width = cnn2d[1];
    cnn2d_channels = cnn2d[2];

    cnn1d_filters = cfObj["cnn1d_filters"];
    cnn1d_filter_height = cfObj["cnn1d_filter_height"]
    cnn1d_pool_rates = cfObj["cnn1d_pool_rates"]
    cnn2d_filters = cfObj["cnn2d_filters"]
    cnn2d_filter_height = cfObj["cnn2d_filter_height"]
    cnn2d_filter_width = cfObj["cnn2d_filter_width"]
    cnn2d_pool_rates_height = cfObj["cnn2d_pool_rates_height"]
    cnn2d_pool_rates_width = cfObj["cnn2d_pool_rates_width"]
    #create graph
    net_wk = create_graph(n,cnn1d,cnn2d,
                          cnn1d_filters,cnn1d_filter_height,cnn1d_pool_rates,
                          cnn2d_filters,cnn2d_filter_height,cnn2d_filter_width,
                          cnn2d_pool_rates_height,   cnn2d_pool_rates_width,                       
                          K,lamda,ns_array)

    #create graph
    graph = net_wk["graph"]
    #merged_summary = net_wk["merged_summary"]
    with graph.as_default():
        #save graph to Disk
        saver = tf.train.Saver()
        loss = graph.get_tensor_by_name("loss/loss:0")
        
    ### create tensorflow structure end ###
    sess = tf.Session(graph=graph)
    check_point_path = model_path # 保存好模型的文件路径
    ckpt = tf.train.get_checkpoint_state(checkpoint_dir=check_point_path)
    saver.restore(sess,ckpt.model_checkpoint_path)
    
    #--------------------------------------------------------------
    trunk           = cfObj["trunk"]
    #取得各个占位符
    sc_tensor = graph.get_tensor_by_name("feature_gather/scalar_input:0");
    c1_tensor = graph.get_tensor_by_name("cnn1d/cnn1d_in:0");
    c2_tensor = graph.get_tensor_by_name("cnn2d/cnn2d_in:0");
    tout = graph.get_tensor_by_name("loss/tr_out:0");
    outv = graph.get_tensor_by_name("fnn/output:0");
    x_plot = []
    y_plot = []

    finished = 0
    bg = 0
    while(finished == 0):
        [scalar_data,cnn1_data,cnn2_data,y_data,bg,fns] = readnext_run_batch(
                testing_path,n,
                [trunk,cnn1d_height,1,cnn1d_channels],
                [trunk,cnn2d_height,cnn2d_width,cnn2d_channels],K,bg,1
                )
        #print loss
        lss = sess.run(loss,feed_dict=
                       {sc_tensor:scalar_data,
                       c1_tensor:cnn1_data,
                       c2_tensor:cnn2_data,
                       tout:y_data})
        result = sess.run(outv,feed_dict=
                       {sc_tensor:scalar_data,
                       c1_tensor:cnn1_data,
                       c2_tensor:cnn2_data,
                       tout:y_data})        
        print(bg,lss)
        x_plot.append(bg);
        y_plot.append(lss)
        sys.stdout.flush()
        if (bg==0):
            finished = 1
    #mp.plot(x_plot,y_plot,'b')
    return {"x_plot":x_plot,"y_plot":y_plot,"graph":graph,"filenames":fns,
            "result":result};    

def run_model(trainning_task_file,input_path,model_path,output_file):
    """
    run_model函数用来应用模型
    @para trainning_task_file  cfg文件名，参见函数 read_config_file
    @para input_path 样本文件夹。每个文件夹有N组样本，
                                以000000000开始依次递增。扩展名有3个
                                .scalar:标量组，共channel组标量,double
                                .1d: 一维卷积组，每个channel放一起,double
                                .2d: 二维卷积组，每个channel放一起,double
    @para model_path           存储训练结果的路径
    @para output_file          输出文件名。每行一组样本，
                                包含逗号分割的[特征向量],[输出结果]
    @return 返回字典：样本(x_test)、结果(result)
    """
    #reading config file
    cfObj = read_config_file(trainning_task_file)
    n               = cfObj["scalar_N"]
    K               = cfObj["K"]
    lamda           = cfObj["dlambda"]
    ns_array        = cfObj["ns_array"]
    cnn1d           = cfObj["cnn1d"]
    cnn2d           = cfObj["cnn2d"]
                     
    cnn1d_height = cnn1d[0]
    cnn1d_channels = cnn1d[2]
    cnn2d_height = cnn2d[0];
    cnn2d_width = cnn2d[1];
    cnn2d_channels = cnn2d[2];
    #create graph
    cnn1d_filters = cfObj["cnn1d_filters"];
    cnn1d_filter_height = cfObj["cnn1d_filter_height"]
    cnn1d_pool_rates = cfObj["cnn1d_pool_rates"]
    cnn2d_filters = cfObj["cnn2d_filters"]
    cnn2d_filter_height = cfObj["cnn2d_filter_height"]
    cnn2d_filter_width = cfObj["cnn2d_filter_width"]
    cnn2d_pool_rates_height = cfObj["cnn2d_pool_rates_height"]
    cnn2d_pool_rates_width = cfObj["cnn2d_pool_rates_width"]
    #create graph
    net_wk = create_graph(n,cnn1d,cnn2d,
                          cnn1d_filters,cnn1d_filter_height,cnn1d_pool_rates,
                          cnn2d_filters,cnn2d_filter_height,cnn2d_filter_width,
                          cnn2d_pool_rates_height,   cnn2d_pool_rates_width,                       
                          K,lamda,ns_array)
    #create graph
    graph = net_wk["graph"]
    #merged_summary = net_wk["merged_summary"]
    with graph.as_default():
        #save graph to Disk
        saver = tf.train.Saver()       
        
    ### create tensorflow structure end ###
    sess = tf.Session(graph=graph)
    check_point_path = model_path # 保存好模型的文件路径
    ckpt = tf.train.get_checkpoint_state(checkpoint_dir=check_point_path)
    saver.restore(sess,ckpt.model_checkpoint_path)
    
    #--------------------------------------------------------------
    trunk           = cfObj["trunk"]
 
    #取得各个占位符
    sc_tensor = graph.get_tensor_by_name("feature_gather/scalar_input:0");
    c1_tensor = graph.get_tensor_by_name("cnn1d/cnn1d_in:0");
    c2_tensor = graph.get_tensor_by_name("cnn2d/cnn2d_in:0");
    tout = graph.get_tensor_by_name("loss/tr_out:0");
    outv = graph.get_tensor_by_name("fnn/output:0");

    finished = 0
    bg = 0
    while(finished == 0):
        [scalar_data,cnn1_data,cnn2_data,y_data,bg,fns] = readnext_run_batch(
                input_path,n,
                [trunk,cnn1d_height,1,cnn1d_channels],
                [trunk,cnn2d_height,cnn2d_width,cnn2d_channels],K,bg,0
                )
        #print loss
        result = sess.run(outv,feed_dict=
                       {sc_tensor:scalar_data,
                       c1_tensor:cnn1_data,
                       c2_tensor:cnn2_data,
                       tout:y_data})    
        ct = 0
        for filename in fns:
            resa = result[ct].astype(np.float64)
            resa.tofile(filename)
            ct = ct + 1
        if (bg==0):
            finished = 1
    #mp.plot(x_plot,y_plot,'b')
    return {"result":result,"graph":graph,"filenames":fns};    
   
  
   
def dump_all(trainning_task_file,model_path):
    """
    dump_all 是用于提取所有矩阵信息的函数。
    """
    #reading config file    
    cfObj = read_config_file(trainning_task_file)
    n               = cfObj["n"]
    K               = cfObj["K"]
    lamda           = cfObj["dlambda"]
    ns_array        = cfObj["ns_array"]
    net_wk = create_graph(n,K,lamda,ns_array)
    graph = net_wk["graph"]
    #merged_summary = net_wk["merged_summary"]
    with graph.as_default():
        #save graph to Disk
        saver = tf.train.Saver()
        
    ### create tensorflow structure end ###
    sess = tf.Session(graph=graph)
    check_point_path = model_path # 保存好模型的文件路径
    ckpt = tf.train.get_checkpoint_state(checkpoint_dir=check_point_path)
    saver.restore(sess,ckpt.model_checkpoint_path)

    #Output is the last layer, append to last
    ns_array        = cfObj["ns_array"]
    ns_array.append(K)
    hidden_layer_size = len(ns_array)
    #--------------------------------------------------------------
    W = []
    b = []
    z = []
    punish = tf.constant(0.0, name='regular')
    for idx in range(0,hidden_layer_size)    :
        sess_v = sess.graph.get_tensor_by_name("fnn/"+'W'+str(idx+1)+":0");
        W.append(sess.run(sess_v))
        sess_v = sess.graph.get_tensor_by_name("fnn/"+'b'+str(idx+1)+":0");
        b.append(sess.run(sess_v))
    sess_v = sess.graph.get_tensor_by_name("fnn/regular:0");
    punish = sess.run(sess_v)
    
    cfObj["W"] = W;
    cfObj["b"] = b;
    cfObj["z"] = z;
    cfObj["punish"] = punish;
    cfObj["relation"] = "a' = f (a * W + b)"
    sess_v = sess.graph.get_tensor_by_name("train/train_times:0");
    train_times = sess.run(sess_v)
    cfObj["train_times"] = train_times;
    return cfObj
