#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Time    :   2021/06/06 16:11:48
@Author  :   Leo Wood 
@Contact :   leowood@foxmail.com
@Disc    :   分类模型启动脚本
'''

import re
import tensorflow as tf
import numpy as np
import codecs
import pickle
import os
from datetime import datetime
from Seg_Sents_En_Z import seg_sens

from flask import Flask,request

import argparse

app = Flask(__name__)

from bert_base.bert import tokenization, modeling
from bert_base.train.models import create_classification_model

parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)


## some parameters

parser.add_argument("--api_server_ip", type=str, default='127.0.0.1',
                    help="API Server IP,默认为本机127.0.0.1")

parser.add_argument("--api_server_port", type=int, required=True,
                    help="api_server端口号")

parser.add_argument("--bert_dir", type=str, required=True,
                    help="BERT模型路径")

parser.add_argument("--model_dir", type=str, required=True,
                    help="分类模型路径")

parser.add_argument("--max_seq_length", type=int, default=128,
                    help="分类模型最大序列长度")

parser.add_argument("--gpu_rank", type=str, default='0',
                    help="gpu序号")


args = parser.parse_args()

os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_rank

DO_LOWER_CASE = False
MAX_SEQ_LENGTH = args.max_seq_length

model_dir = args.model_dir
bert_dir = args.bert_dir


# ####
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"

# DO_LOWER_CASE = False
# MAX_SEQ_LENGTH = 128

# model_dir = "models/question"
# bert_dir = "/data/leo/Projects/bert/models/cased_L-12_H-768_A-12"
# ####


batch_size = 1
is_training=False
use_one_hot_embeddings=False

gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
sess=tf.Session(config=gpu_config)
model=None


global graph
input_ids_p, input_mask_p, label_ids_p, segment_ids_p = None, None, None, None


print('checkpoint path:{}'.format(os.path.join(model_dir, "checkpoint")))
if not os.path.exists(os.path.join(model_dir, "checkpoint")):
    raise Exception("failed to get checkpoint. going to return ")

label2id = {"0":0, "1":1}
id2label = {value: key for key, value in label2id.items()}
label_list = label2id.keys()
num_labels = len(label_list)


graph = tf.get_default_graph()
with graph.as_default():
    print("going to restore checkpoint")
    #sess.run(tf.global_variables_initializer())
    input_ids_p = tf.placeholder(tf.int32, [batch_size, MAX_SEQ_LENGTH], name="input_ids")
    input_mask_p = tf.placeholder(tf.int32, [batch_size, MAX_SEQ_LENGTH], name="input_mask")

    bert_config = modeling.BertConfig.from_json_file(os.path.join(bert_dir, 'bert_config.json'))

    loss, per_example_loss, logits, probabilities = create_classification_model(bert_config=bert_config, is_training=False,
        input_ids=input_ids_p, input_mask=input_mask_p, segment_ids=None, labels=None, num_labels=num_labels)
    # pred_ids = tf.argmax(probabilities, axis=-1, output_type=tf.int32, name='pred_ids')
    # pred_ids = tf.identity(pred_ids, 'pred_ids')
    # probabilities = tf.identity(probabilities, 'pred_prob')
    saver = tf.train.Saver()


    # (total_loss, logits, trans, pred_ids) = create_model(
    #     bert_config=bert_config, is_training=False, input_ids=input_ids_p, input_mask=input_mask_p, segment_ids=None,
    #     labels=None, num_labels=num_labels, use_one_hot_embeddings=False, dropout_rate=1.0)

    saver = tf.train.Saver()

    print("model_dir: ",model_dir)

    saver.restore(sess, tf.train.latest_checkpoint(model_dir))


tokenizer = tokenization.FullTokenizer(
        vocab_file=os.path.join(bert_dir, 'vocab.txt'), do_lower_case=DO_LOWER_CASE)


class InputFeatures(object):
    """A single set of features of data."""

    def __init__(self, input_ids, input_mask, segment_ids, ):
        self.input_ids = input_ids
        self.input_mask = input_mask
        self.segment_ids = segment_ids
        # self.label_ids = label_ids
        # self.label_mask = label_mask

def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):
    """
    将一个样本进行分析，然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中
    :param ex_index: index
    :param example: 一个样本
    :param label_list: 标签列表
    :param max_seq_length:
    :param tokenizer:
    :return:
    """
    label_map = {}
    # 1表示从1开始对label进行index化
    for (i, label) in enumerate(label_list, 1):
        label_map[label] = i
    # 保存label->index 的map
    if not os.path.exists(os.path.join(model_dir, 'label2id.pkl')):
        with codecs.open(os.path.join(model_dir, 'label2id.pkl'), 'wb') as w:
            pickle.dump(label_map, w)

    tokens = example
    # tokens = tokenizer.tokenize(example.text)
    # 序列截断
    if len(tokens) >= max_seq_length - 1:
        tokens = tokens[0:(max_seq_length - 2)]  # -2 的原因是因为序列需要加一个句首和句尾标志
    ntokens = []
    segment_ids = []
    # label_ids = []
    ntokens.append("[CLS]")  # 句子开始设置CLS 标志
    segment_ids.append(0)
    # append("O") or append("[CLS]") not sure!
    # label_ids.append(label_map["[CLS]"])  # O OR CLS 没有任何影响，不过我觉得O 会减少标签个数,不过拒收和句尾使用不同的标志来标注，使用LCS 也没毛病
    for i, token in enumerate(tokens):
        ntokens.append(token)
        segment_ids.append(0)
        # label_ids.append(0)
    ntokens.append("[SEP]")  # 句尾添加[SEP] 标志
    segment_ids.append(0)
    # append("O") or append("[SEP]") not sure!
    # label_ids.append(label_map["[SEP]"])
    input_ids = tokenizer.convert_tokens_to_ids(ntokens)  # 将序列中的字(ntokens)转化为ID形式
    input_mask = [1] * len(input_ids)

    # padding, 使用
    while len(input_ids) < max_seq_length:
        input_ids.append(0)
        input_mask.append(0)
        segment_ids.append(0)
        # we don't concerned about it!
        # label_ids.append(0)
        ntokens.append("**NULL**")
        # label_mask.append(0)
    # print(len(input_ids))
    assert len(input_ids) == max_seq_length
    assert len(input_mask) == max_seq_length
    assert len(segment_ids) == max_seq_length
    # assert len(label_ids) == max_seq_length
    # assert len(label_mask) == max_seq_length

    # 结构化为一个类
    feature = InputFeatures(
        input_ids=input_ids,
        input_mask=input_mask,
        segment_ids=segment_ids,
        # label_ids=label_ids,
        # label_mask = label_mask
    )
    return feature

# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
                                 tokenizer):
    """Convert a set of `InputExample`s to a list of `InputFeatures`."""

    features = []
    for (ex_index, example) in enumerate(examples):
        if ex_index % 10000 == 0:
            tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))

        feature = convert_single_example(ex_index, example, label_list,
                                         max_seq_length, tokenizer)

        features.append(feature)
    return features




def predict(text):
    """
    do online prediction. each time make prediction for one instance.
    you can change to a batch if you want.

    :param line: a list. element is: [dummy_label,text_a,text_b]
    :return:
    """
    def convert(line):
        feature = convert_single_example(0, line, label_list, MAX_SEQ_LENGTH, tokenizer)
        input_ids = np.reshape([feature.input_ids],(batch_size, MAX_SEQ_LENGTH))
        input_mask = np.reshape([feature.input_mask],(batch_size, MAX_SEQ_LENGTH))
        segment_ids = np.reshape([feature.segment_ids],(batch_size, MAX_SEQ_LENGTH))
        # label_ids =np.reshape([feature.label_ids],(batch_size, MAX_SEQ_LENGTH))
        return input_ids, input_mask, segment_ids

    global graph
    with graph.as_default():
        # print(id2label)
        sentence = text
        start = datetime.now()
        sentence = tokenizer.tokenize(sentence)
        # print('tokenized sentence:{}'.format(sentence))
        input_ids, input_mask, segment_ids = convert(sentence)

        feed_dict = {input_ids_p: input_ids,
                        input_mask_p: input_mask}
        # run session get current feed_dict result
        prob = sess.run([probabilities], feed_dict)
        # print("prob: ", prob)

        result = prob[0].tolist()[0]
        # print(result)
        if result[0] < result[1]:
            return '1',result[1]
        else:
            return '0',result[0]
      
def convert_lines_to_batches(lines):
    if len(lines) <= batch_size:
        return [lines]
    else:
        batches = []
        num = len(lines)
        while num > batch_size:
            batches.append(lines[:batch_size])
            lines = lines[batch_size:]
            num = num - batch_size
        if num > 0:
            batches.append(lines)
        return batches


# def predict_lines(lines):
#     """
#     do online prediction for multiple lines

#     :param line: a list. 
#     :return:
#     """
#     batches = convert_lines_to_batches(lines)


#     global graph
#     with graph.as_default():
        
#         for batch in batches:
#             num = len(batch)
#             input_ids_temp = []
#             input_mask_temp = []
#             segment_ids_temp = []
#             for line in batch:
#                 feature = convert_single_example(0, line, label_list, MAX_SEQ_LENGTH, tokenizer)
#                 input_ids_temp += feature.input_ids
#                 input_mask_temp += feature.input_mask
#                 segment_ids_temp += feature.segment_ids

#             input_ids = np.reshape([input_ids_temp],(num, MAX_SEQ_LENGTH))
#             input_mask = np.reshape([input_mask_temp],(num, MAX_SEQ_LENGTH))
#             segment_ids = np.reshape([segment_ids_temp],(num, MAX_SEQ_LENGTH))


#             feed_dict = {input_ids_p: input_ids,
#                             input_mask_p: input_mask}

#             # run session get current feed_dict result
#             prob = sess.run([probabilities], feed_dict)
#             print(prob)


            # # print("prob: ", prob)

            # result = prob[0].tolist()[0]
            # # print(result)
            # if result[0] < result[1]:
            #     return '1',result[1]
            # else:
            #     return '0',result[0]

def convert(line):
    feature = convert_single_example(0, line, label_list, MAX_SEQ_LENGTH, tokenizer)
    input_ids = np.reshape([feature.input_ids],(batch_size, MAX_SEQ_LENGTH))
    input_mask = np.reshape([feature.input_mask],(batch_size, MAX_SEQ_LENGTH))
    segment_ids = np.reshape([feature.segment_ids],(batch_size, MAX_SEQ_LENGTH))
    # label_ids =np.reshape([feature.label_ids],(batch_size, MAX_SEQ_LENGTH))
    return input_ids, input_mask, segment_ids


def predict_lines(text):

    lines = seg_sens(text)
    global graph
    with graph.as_default():
        sens = []
        for sentence in lines:
            # print(id2label)
            tokens = tokenizer.tokenize(sentence)
            # print('tokenized sentence:{}'.format(sentence))
            input_ids, input_mask, segment_ids = convert(tokens)

            feed_dict = {input_ids_p: input_ids,
                            input_mask_p: input_mask}
            # run session get current feed_dict result
            prob = sess.run([probabilities], feed_dict)
            # print("prob: ", prob)

            result = prob[0].tolist()[0]
            if result[0] < result[1]:
                sens.append(sentence)
        print(sens)
        return sens



@app.route('/Cla_Result', methods=['GET','POST'])
def Cla_Result():
    if request.method == 'GET':
        text = request.args.get("text")

        sens = predict_lines(text)

        return {'gold_sens':sens}

    if request.method == 'POST':
        text = request.form["text"]

        sens = predict_lines(text)

        return {'gold_sens':sens}



if __name__ == "__main__":
    app.run(args.api_server_ip, port=args.api_server_port,debug=False)

    # lines = ['I have a good day',
    #         'You have a good day',
    #         'This paper aims to solve the math problem.']

    # text = "I have a good day. You have a good day. This paper aims to solve the math problem."
    # predict_lines(text)
