# coding=UTF-8
import tensorflow as tf
import numpy as np
import os
import time
import datetime
from RCNN_noe import RNN_Model
import data_helper_label

flags = tf.app.flags
FLAGS = flags.FLAGS

flags.DEFINE_integer('batch_size', 100, 'the batch_size of the training procedure')
flags.DEFINE_float('lr', 0.1 , 'the learning rate')
flags.DEFINE_float('lr_decay', 0.8, 'the learning rate decay')
flags.DEFINE_integer('vocabulary_size', 27633, 'vocabulary_size')
flags.DEFINE_integer('embedding_dim', 400, 'embedding dim')
flags.DEFINE_integer('hidden_neural_size', 100, 'hidde neural size')
flags.DEFINE_integer('hidden_neural_size2', 10, 'hidde neural size2')

flags.DEFINE_integer('hidden_layer_num', 1, 'LSTM hidden layer num')

# flags.DEFINE_string('dataset_path','data/subj0.pkl','dataset path')
flags.DEFINE_string('embedFile', 'E:/学习/王/data/wikivec1.bin', 'embedpath')

flags.DEFINE_string('dataset_path', 'data/trainlable1.txt', 'dataset path')
flags.DEFINE_integer('max_len', 66, 'max_len of training sentence')
# 定义卷积层参数
flags.DEFINE_string("filter_sizes", "3", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 300, "Number of filters per filter size (default: 128)")

flags.DEFINE_integer('valid_num', 100, 'epoch num of validation')  # 确定模型的训练周期
flags.DEFINE_integer('checkpoint_num', 400, 'epoch num of checkpoint')  # 检查站的周期数目
flags.DEFINE_float('init_scale', 0.1, 'init scale')  # 初始化范围
flags.DEFINE_integer('class_num', 17, 'class num')
flags.DEFINE_integer('class_num_2', 2, 'class num two')
flags.DEFINE_float('keep_prob', 0.65, 'dropout rate')  # 使用dropout方法，设定节点被保存的概率
flags.DEFINE_integer('num_epoch', 1000, 'num epoch')  # 定义周期数量？？？
flags.DEFINE_integer('max_decay_epoch', 10, 'num epoch')  # 改变学习速率的周期数转折点
flags.DEFINE_integer('max_grad_norm', 5, 'max_grad_norm')  # 用于控制梯度膨胀的参数
flags.DEFINE_string('out_dir', os.path.abspath(os.path.join("E:/学习/王/data/", "RelationClassify/runs_2")), 'out directory')  # 输出目录
flags.DEFINE_integer('check_point_every', 10, 'checkpoint every num epoch')  # ???


flags.DEFINE_integer('PF_size',50,"Position feature size")

flags.DEFINE_bool("restore", False,"contution tarin or not")
flags.DEFINE_bool("wordembedding_only", False,"using only word vector or content position vector")
flags.DEFINE_bool("get_entity_only",False,"only output two entities")
flags.DEFINE_bool("lstm_all",False,"output all time of lstm")

f=open('result.txt','a')

class Config(object):
    hidden_neural_size = FLAGS.hidden_neural_size
    hidden_neural_size2 = FLAGS.hidden_neural_size2

    vocabulary_size = FLAGS.vocabulary_size
    embed_dim = FLAGS.embedding_dim
    hidden_layer_num = FLAGS.hidden_layer_num
    embedFile = FLAGS.embedFile

    filter_sizes = FLAGS.filter_sizes
    num_filters = FLAGS.num_filters

    class_num = FLAGS.class_num  # 模型想要分几类
    class_num_2=FLAGS.class_num_2
    keep_prob = FLAGS.keep_prob  # dropout中节点保留下来的概率
    lr = FLAGS.lr
    lr_decay = FLAGS.lr_decay
    batch_size = FLAGS.batch_size
    num_step = FLAGS.max_len  # 句子的截断长度
    max_grad_norm = FLAGS.max_grad_norm  # 用于控制梯度膨胀的参数
    num_epoch = FLAGS.num_epoch
    max_decay_epoch = FLAGS.max_decay_epoch  # 改变学习速率的周期点
    valid_num = FLAGS.valid_num
    out_dir = FLAGS.out_dir
    checkpoint_every = FLAGS.check_point_every

    PF_size=FLAGS.PF_size

    restore=FLAGS.restore
    wordembedding_only=FLAGS.wordembedding_only
    get_entity_only=FLAGS.get_entity_only
    lstm_all=FLAGS.lstm_all


    # 定义评估函数（测试过程）
def evaluate(model, session, data, global_steps = None, summary_writer = None):
    total_accuracy = 0.0
    total_cost = 0.0
    iters = 0
    total_target_other = 0
    total_prediction_other = 0.0+1e-10
    total_correct_num_other = 0
    TPclass=[0 for i in range(Config.class_num)]
    TFPclass=[0.0 + 1e-10 for i in range(Config.class_num)]
    TFNclass = [0.0 + 1e-10 for i in range(Config.class_num)]

    precision=[0.0 for i in range(Config.class_num)]
    recall=[0.0 for i in range(Config.class_num)]


    def get_target(target):
        temp_target = tf.equal(tf.constant(Config.class_num, dtype = tf.int64, shape = [Config.batch_size]),
                               tf.argmax(target, 1))
        target_other = tf.cast(temp_target, tf.int32)
        return target_other

    # 使用当前数据测试模型
    m=0
    for step, (x, y, z,z_label,d,entity_pos) in enumerate(data_helper_label.batch_iter(data, batch_size = FLAGS.batch_size)):
        # fetches=model.correct_num
        feed_dict = {}
        feed_dict[model.input_data] = x  # 制定输入数据
        feed_dict[model.input_data_label] = z
        feed_dict[model.input_label]=z_label
        feed_dict[model.position] = d  #位置特征向量
        feed_dict[model.target] = y  # 制定目标数据，即标签
        #feed_dict[model.mask_x] = mask_x  # 制定掩膜
        feed_dict[model.entity_posi]=entity_pos
        # model.assign_new_batch_size(session,len(x))#建立新的批处理大小（测试）
        state_fw = session.run(model.initial_state_fw)  # 运行模型的初始化函数来初始化测试模型的状态
        state_bw = session.run(model.initial_state_bw)  # 运行模型的初始化函数来初始化测试模型的状态
        #state_fw2 = session.run(model.initial_state_fw2)  # 运行模型的初始化函数来初始化测试模型的状态

        for i, (fw_c, fw_h) in enumerate(model.initial_state_fw):
            feed_dict[fw_c] = state_fw[i].c
            feed_dict[fw_h] = state_fw[i].h

        for i, (bw_c, bw_h) in enumerate(model.initial_state_bw):
            feed_dict[bw_c] = state_bw[i].c
            feed_dict[bw_h] = state_bw[i].h

        fetches = [model.loss, model.accuracy, model.prediction,model.logits]
        cost,accuracy,prediction,logits=session.run(fetches,feed_dict)


        total_accuracy += accuracy
        total_cost += cost
        iters = iters + 1

        target = session.run(tf.argmax(y, 1))

        for i in range(Config.class_num):

            for j in range(Config.batch_size):

                if (i == prediction[j]):  # 如果第一类与第j个样本的预测类别相同

                    TFPclass[i] = TFPclass[i] + 1  # TP+FP 样本中预测得到的正类数目

                    if (i == target[j]):
                        TPclass[i] = TPclass[i] + 1  # 预测正确的正类数目
                if (i == target[j]):
                    TFNclass[i] = TFNclass[i] + 1  # 样本中标记的正类数目TP+FN
        #
        # # 二分类
        #
        # target_other=get_target(y)
        # #target_other = session.run(model.target_other)
        # target_other_one_like = tf.ones_like(target_other)
        #
        # prediction_other_one_like = tf.ones_like(prediction_other)
        # correct_num_bool = tf.logical_and(tf.equal(target_other, target_other_one_like),
        #                                   tf.equal(prediction_other, prediction_other_one_like))
        # correct_num_other = tf.reduce_sum(tf.cast(correct_num_bool, tf.int32))
        # total_correct_num_other = total_correct_num_other + session.run(correct_num_other)
        # total_target_other = total_target_other + session.run(tf.reduce_sum(target_other))
        # total_prediction_other = total_prediction_other + session.run(tf.reduce_sum(prediction_other))
    for nclass in range(Config.class_num):
        precision[nclass] = float(TPclass[nclass]) / TFPclass[nclass]
        recall[nclass] = float(TPclass[nclass]) / TFNclass[nclass]
    print("一周期后的精确度and召回率and F1 计算......")
    print(precision)
    print(recall)
    total_precision = session.run(tf.reduce_sum(precision))
    final_precision = total_precision / (Config.class_num)

    total_recall = session.run(tf.reduce_sum(recall))
    final_recall = total_recall / (Config.class_num)

    F1 = 2 * final_precision * final_recall / (final_precision + final_recall)

    # #二分类
    # if total_correct_num_other>0 and total_prediction_other>0:
    #     P = float(total_correct_num_other) / total_prediction_other  # 二分类精确度
    #     R = float(total_correct_num_other) / total_target_other  # 二分类召回率
    #     F1_bin=2*P*R/(P+R)
    #     print ("other 类： P：%f R: %f F1_bin: %f" % (P, R, F1_bin))

    print("pre:%f rec:%f F1:%f" % (final_precision, final_recall, F1))

    #print ("other 类： P：%f R: %f F1 %f" % (P, R, F1_bin))
    # print ("total_correct_num_other：%r"% total_correct_num_other)
    # print ("total_prediction_other: %r"% total_prediction_other)
    # print ("total_target_other: %r"% total_target_other)


    final_cost = total_cost / iters
    final_accuracy = total_accuracy / iters
    dev_summary = tf.summary.scalar('dev_accuracy', final_accuracy)
    dev_summary = session.run(dev_summary)
    if summary_writer:
        summary_writer.add_summary(dev_summary, global_steps)
        summary_writer.flush()
    # return final_accuracy,final_cost

    print ("一个周期后：cost %f accuracy %f iters %d" % (final_cost, final_accuracy, iters))

    return final_accuracy,final_precision, final_recall, F1,logits


# 使用训练好的模型在数据机上测试
def run_epoch(model, session, data, train_op, global_steps, train_summary_writer=None, valid_summary_writer = None):
    # for step,(x,y,mask_x) in enumerate(data_helper.batch_iter(data,batch_size=FLAGS.batch_size)):
    for step, (x, y, z,z_label,d,entity_posi) in enumerate(data_helper_label.batch_iter(data, batch_size = FLAGS.batch_size)):

        feed_dict = {}
        feed_dict[model.input_data] = x  # 制定输入数据
        feed_dict[model.input_data_label] = z
        feed_dict[model.input_label]=z_label
        feed_dict[model.position]=d
        feed_dict[model.target] = y  # 制定目标数据，即标签

        #feed_dict[model.mask_x] = mask_x  # 制定掩膜
        feed_dict[model.entity_posi]=entity_posi

        state_fw = session.run(model.initial_state_fw)  # 运行模型的初始化函数来初始化测试模型的状
        state_bw = session.run(model.initial_state_bw)  # 运行模型的初始化函数来初始化测试模型的状


        for i, (fw_c, fw_h) in enumerate(model.initial_state_fw):
            feed_dict[fw_c] = state_fw[i].c
            feed_dict[fw_h] = state_fw[i].h

        for i, (bw_c, bw_h) in enumerate(model.initial_state_bw):
            feed_dict[bw_c] = state_bw[i].c
            feed_dict[bw_h] = state_bw[i].h


        # 在当前batch下运行train_op病计算损失

        fetches = [model.loss, model.accuracy, train_op,model.summary]
        cost, accuracy, _,summary = session.run(fetches,feed_dict=feed_dict)
        # train_summary_writer.add_summary(summary, global_steps)
        # train_summary_writer.flush()

        # valid_accuracy=evaluate(valid_model,session,valid_data,tf.no_op(),global_steps,valid_summary_writer)

        # 只有在训练是输出日志
        #if (global_steps % 100 == 0):
            # print("the %i step, train cost is: %f and the train accuracy is %f and the valid accuracy is %f"%(global_steps,cost,accuracy,valid_accuracy))

            #print("the %i step, train cost is: %f and the train accuracy is %f" % (global_steps, cost, accuracy))
        global_steps += 1
    return global_steps




# 定义训练函数
def train_step():
    print("loading the dataset...")

    config = Config()
    eval_config = Config()
    eval_config.keep_prob = 1.0


    trainfile = 'E:/学习/王/data/word_trainlable1.txt'

    testfile = 'E:/学习/王/data/word_testlable1.txt'



    train_data, test_data = data_helper_label.load_data(FLAGS.max_len,
                                                                    hidden_neural_size = FLAGS.hidden_neural_size,
                                                                    trainfile = trainfile,
                                                                    testfile = testfile)  # 加载数据/训练数据，验证数据，测试数据

    print("begin training")
    #f = file('result.txt', 'a')
    f=open('./result.txt','a')


    # gpu_config=tf.ConfigProto()
    # gpu_config.gpu_options.allow_growth=True
    with tf.Graph().as_default(), tf.Session() as session:

        initializer = tf.random_uniform_initializer(-1 * FLAGS.init_scale, 1 * FLAGS.init_scale)  # 按均匀分布随机初始化
        with tf.variable_scope("model", reuse = None, initializer = initializer):
            train_model = RNN_Model(config = config, is_training = True)  # 建立LSTM模型
        with tf.variable_scope("model", reuse = True, initializer = initializer):
            # 调用训练完一个batch的模型在验证数据集上做验证,在修改参数
            valid_model = RNN_Model(config = eval_config, is_training = False)
            test_model = RNN_Model(config = eval_config, is_training = False)

        # add summary
        # train_summary_op = tf.merge_summary([model.loss_summary,model.accuracy])
        # train_summary_dir = os.path.join(config.out_dir, "summaries", "train")
        # train_summary_writer = tf.summary.FileWriter(train_summary_dir, session.graph)

        # dev_summary_op = tf.merge_summary([valid_model.loss_summary,valid_model.accuracy])
        dev_summary_dir = os.path.join(eval_config.out_dir, "summaries", "dev")
        dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, session.graph)

        # add checkpoint
        checkpoint_dir = os.path.abspath(os.path.join(config.out_dir, "checkpoints"))
        checkpoint_prefix = os.path.join(checkpoint_dir, "model")
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        saver = tf.train.Saver(tf.all_variables())
        tf.global_variables_initializer().run()
        #tf.initialize_all_variables().run()
        global_steps = 1
        begin_time = int(time.time())
        # no_op=session.run(tf.no_op)
        if config.restore:
            save_path = tf.train.latest_checkpoint(checkpoint_dir)
            saver.restore(session, save_path)
        m=0.0
        epoch=0
        pre,rec,f1=0.0,0.0,0.0
        for i in range(config.num_epoch):

            print("the %d epoch training..." % (i + 1))
            lr_decay = config.lr_decay ** max(i - config.max_decay_epoch, 0.0)
            if(config.lr*lr_decay<0.01):
                lr_new=0.01
                train_model.assign_new_lr(session,lr_new)
            else:

                train_model.assign_new_lr(session, config.lr * lr_decay)
            # train_data,valid_data,test_data=data_helper.load_data(FLAGS.max_len,batch_size=config.batch_size)#加载数据/训练数据，验证数据，测试数据

            global_steps = run_epoch(train_model, session, train_data, train_model.train_op, global_steps)
            #if i % 10 == 0:
            if i >0:
                final_accary,final_precision, final_recall, F1,_ = evaluate(test_model, session, test_data,global_steps,dev_summary_writer)
                if F1 > f1:
                    epoch=i+1
                    pre=final_precision
                    rec=final_recall
                    f1=F1
                    print('目前F1最大周期：%r pre:%f rec:%f F1:%f' % (epoch,pre,rec,f1))
                else:
                    print('目前F1最大周期：%r pre:%f rec:%f F1:%f' % (epoch, pre, rec, f1))
                s1="pre: "+str(final_precision)
                s2="rec: "+str(final_recall)
                s3="F1: "+str(F1)
                s="第"+str(i)+"个周期"
                f.write(s)
                f.write(s1)
                f.write('\n')
                f.write(s2)
                f.write('\n')
                f.write(s3)
                f.write('\n')

                #print('准确率: %f' % final_accary)

            if i % config.checkpoint_every == 0:
                path = saver.save(session, checkpoint_prefix, global_steps)
                print("Saved model chechpoint to{}\n".format(path))

        print("the train is finished")
        end_time = int(time.time())
        print("training takes %d seconds already\n" % (end_time - begin_time))
        test_accuracy,test_precision, test_recall, F1,test_logins = evaluate(test_model, session, test_data)
        print ("预测打分为： %r" % test_logins)
        f.write(str(test_logins))
        f.close()

        print("the test data accuracy is %f" % test_accuracy)
        print("program end!")


def main(_):
    train_step()


if __name__ == "__main__":
    tf.app.run()