﻿# -- encoding:utf-8 --

import codecs
from datetime import datetime
import pickle
import itertools
from collections import OrderedDict
import os
import re
import tensorflow as tf
import numpy as np
from model import Model
from loader import load_sentences, update_tag_scheme
from loader import char_mapping, tag_mapping
from loader import augment_with_pretrained, prepare_dataset
from utils import get_logger, make_path, clean, create_model, save_model
from utils import print_config, save_config, load_config, test_ner
from data_utils import load_word2vec, create_input, input_from_line, BatchManager

root_path = os.path.abspath(os.getcwd())
flags = tf.app.flags
flags.DEFINE_boolean("clean", False, "clean train folder")
flags.DEFINE_boolean("train", True, "Whether train the model 训练还是验证")
# configurations for the model
flags.DEFINE_integer("seg_dim", 24, "Embedding size for segmentation, 0 if not used 分词特征转换的向量维度大小")
flags.DEFINE_integer("char_dim", 128, "Embedding size for characters 字向量的维度大小")
flags.DEFINE_integer("lstm_dim", 128,
                     "Num of hidden units in LSTM, or num of filters in IDCNN LSTM中神经元大小或者CNN中卷积核的数目")
flags.DEFINE_string("tag_schema", "iobes", "tagging schema iobes or iob 标签类型")

# configurations for training
flags.DEFINE_float("clip", 5, "Gradient clip")
flags.DEFINE_float("dropout", 0.3, "Dropout rate")
flags.DEFINE_float("batch_size", 20, "batch size")
flags.DEFINE_float("lr", 0.0001, "Initial learning rate")
flags.DEFINE_string("optimizer", "adam", "Optimizer for training")
flags.DEFINE_boolean("pre_emb", True, "Wither use pre-trained embedding")
flags.DEFINE_boolean("zeros", False, "Wither replace digits with zero")
flags.DEFINE_boolean("lower", False, "Wither lower case")
flags.DEFINE_integer("max_epoch", 100, "maximum training epochs")
flags.DEFINE_integer("steps_check", 100, "steps per checkpoint")
flags.DEFINE_string("summary_path", "summary", "Path to store summaries")
flags.DEFINE_string("log_file", "./log/train.log", "File for log")
flags.DEFINE_string("map_file", "./config/maps.pkl", "file for maps")
flags.DEFINE_string("vocab_file", "vocab.json", "File for vocab")
flags.DEFINE_string("config_file", "./config/config_file", "File for config")
flags.DEFINE_string("script", "conlleval", "evaluation script")
flags.DEFINE_string("result_path", "result", "Path for results")
flags.DEFINE_string("emb_file", os.path.join(root_path, "data", "vec.txt"), "Path for pre_trained embedding")
flags.DEFINE_string("train_file", os.path.join(root_path, "data", "example.train"), "Path for train data")
flags.DEFINE_string("dev_file", os.path.join(root_path, "data", "example.dev"), "Path for dev data")
flags.DEFINE_string("test_file", os.path.join(root_path, "data", "example.test"), "Path for test data")

# flags.DEFINE_string("ckpt_path", "./ckpt/idcnn", "Path to save model")
flags.DEFINE_string("ckpt_path", "./ckpt/bilstm", "Path to save model")
# flags.DEFINE_string("model_type", "idcnn", "Model type, can be idcnn or bilstm 特征提取网络的类型")
flags.DEFINE_string("model_type", "bilstm", "Model type, can be idcnn or bilstm 特征提取网络的类型")

FLAGS = tf.app.flags.FLAGS
assert FLAGS.clip < 5.1, "gradient clip should't be too much"
assert 0 <= FLAGS.dropout < 1, "dropout rate between 0 and 1"
assert FLAGS.lr > 0, "learning rate must larger than zero"
assert FLAGS.optimizer in ["adam", "sgd", "adagrad"]

# 日志工具(记录操作日志)
logger = get_logger(FLAGS.log_file)


# config for the model
def config_model(char_to_id, tag_to_id):
    config = OrderedDict()
    config["model_type"] = FLAGS.model_type
    config["num_chars"] = len(char_to_id)
    config["char_dim"] = FLAGS.char_dim
    config["num_tags"] = len(tag_to_id)
    config["seg_dim"] = FLAGS.seg_dim
    config["lstm_dim"] = FLAGS.lstm_dim
    config["batch_size"] = FLAGS.batch_size

    config["emb_file"] = FLAGS.emb_file
    config["clip"] = FLAGS.clip
    config["dropout_keep"] = 1.0 - FLAGS.dropout
    config["optimizer"] = FLAGS.optimizer
    config["lr"] = FLAGS.lr
    config["tag_schema"] = FLAGS.tag_schema
    config["pre_emb"] = FLAGS.pre_emb
    config["zeros"] = FLAGS.zeros
    config["lower"] = FLAGS.lower
    return config


def evaluate(sess, model, name, data, id_to_tag, logger):
    logger.info("evaluate:{}".format(name))
    ner_results = model.evaluate(sess, data, id_to_tag)
    eval_lines = test_ner(ner_results, FLAGS.result_path)
    for line in eval_lines:
        logger.info(line)
    f1 = float(eval_lines[1].strip().split()[-1])

    if name == "dev":
        best_test_f1 = model.best_dev_f1.eval()
        if f1 > best_test_f1:
            tf.assign(model.best_dev_f1, f1).eval()
            logger.info("new best dev f1 score:{:>.3f}".format(f1))
        return f1 > best_test_f1
    elif name == "test":
        best_test_f1 = model.best_test_f1.eval()
        if f1 > best_test_f1:
            tf.assign(model.best_test_f1, f1).eval()
            logger.info("new best test f1 score:{:>.3f}".format(f1))
        return f1 > best_test_f1


def train():
    logger.info("load train/dev/test sentences....")
    # load data sets [class_list[[['请', 'O'], ['双', 'O'], ['击', 'O'],...], …] 详细讲   data/example.*,
    # 返回值是一个list列表，列表中的每个值是一个文本的文本数据X以及标签数据Y，也是一个list列表，内部的文本list中是每个时刻、每个时刻的数据存储[X,Y]
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower, FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    logger.info("update ner tagging schema of：{}".format(FLAGS.tag_schema))
    # Use selected tagging scheme (IOB / IOBES) 详细讲 IOB：实体开头为B,其他为I,非实体O
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(dev_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # 这个if语句块中，只是加载了字与id之间的映射关系以及标签和id之间的映射关系
        # create dictionary for word
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(),
                FLAGS.emb_file,
                list(itertools.chain.from_iterable(
                    [[w[0] for w in s] for s in train_sentences])
                )
            )
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences, FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        # with open('maps.txt','w',encoding='utf8') as f1:
        # f1.writelines(str(char_to_id)+" "+id_to_char+" "+str(tag_to_id)+" "+id_to_tag+'\n')
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    # 将X和Y转换为数字id来表示
    logger.info("Convert text data to idx data....")
    train_data = prepare_dataset(
        train_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    dev_data = prepare_dataset(
        dev_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    test_data = prepare_dataset(
        test_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    print("%i / %i / %i sentences in train / dev / test." % (
        len(train_data), 0, 0))

    logger.info("Create the batch manager object.....")
    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)

    logger.info("Fetch the model config parameters.....")
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_config.allow_soft_placement = True

    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        # 构建模型
        logger.info("Build the model....")
        model = create_model(sess, Model, FLAGS.ckpt_path,
                             load_word2vec, config, id_to_char, logger)

        logger.info("Start training.....")
        loss = []
        step = 1
        with tf.device("/gpu:0"):
            # 设置为100个epoch
            for i in range(800):
                # # 设置总共训练的次数(原码）
                # for batch in train_manager.iter_batch(shuffle=True):
                #     step, batch_loss = model.run_step(sess, True, batch)
                #     loss.append(batch_loss)
                #     if step % FLAGS.steps_check == 0:
                #         iteration = step // steps_per_epoch + 1
                #         logger.info("iteration:{} step:{}/{}, "
                #                     "NER loss:{:>9.6f}".format(
                #             iteration, step % steps_per_epoch, steps_per_epoch, np.mean(loss)))
                #         loss = []
                #
                # evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
                # if i % 7 == 0:
                #     # 每7个epoch进行一次模型的持久化操作
                #     logger.info('Save the model to {}'.format(FLAGS.ckpt_path))
                #     save_model(sess, model, FLAGS.ckpt_path, logger)
                # --------------------------------------------------------------------------------

                # 设置总共训练的次数（修改）
                b_step = 1
                for batch in train_manager.iter_batch(shuffle=True):
                    _, batch_loss = model.run_step(sess, True, batch)
                    loss.append(batch_loss)
                    if step % 10 == 0:
                        logger.info("epoch:{},step:{}/{},loss:{}".format(i+1, b_step, steps_per_epoch, batch_loss))

                    if step % 100 == 0:
                        # 每100个step进行一次模型的持久化操作
                        logger.info('Save the model to {},mean_loss:{}'.format(FLAGS.ckpt_path, np.mean(loss)))
                        save_model(sess, model, FLAGS.ckpt_path, logger, str(np.mean(loss)))
                        loss = []
                        evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
                    b_step += 1
                step += 1
                # --------------------------------------------------------------------------------

            evaluate(sess, model, "test", test_manager, id_to_tag, logger)

def evaluate_line():
    config = load_config(FLAGS.config_file)
    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.allow_soft_placement = True
    tf_config.gpu_options.allow_growth = True
    with open(FLAGS.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
        while True:
            # try:
            #     line = input("请输入测试句子:")
            #     result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
            #     print(result)
            # except Exception as e:
            #     logger.info(e)
            line = input("请输入测试句子:")
            if line.strip():
                line = re.compile(' ').sub('', line)  # 去掉中间空格
                result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
                print(result)
            else:
                print("不能输入空!!!!")

def main(_):
    if FLAGS.train:
        if FLAGS.clean:
            # 清空数据
            logger.info('Clean the data folder and cache...')
            clean(FLAGS)
        # 模型训练
        logger.info('Start call train method....')
        train()
    else:
        logger.info('Start call evaluate_line method....')
        evaluate_line()


if __name__ == "__main__":
    # 明确给定直接运行main函数
    tf.app.run(main)
