# encoding=utf-8

"""
基于命令行的在线预测方法
@Author: Macan (ma_cancan@163.com) 
"""


import re
import tensorflow as tf
import numpy as np
import codecs
import pickle
import os
from datetime import datetime

from flask import Flask,request

app = Flask(__name__)

from bert_base.bert import tokenization, modeling



## some parameters
DO_LOWER_CASE = False
MAX_SEQ_LENGTH = 128


model_dir = '/data/leo/Projects/Sci_Engine/resource/question_ner/output'
bert_dir = '/data/leo/Projects/bert/models/cased_L-12_H-768_A-12'


is_training=False
use_one_hot_embeddings=False
batch_size=8

gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
sess=tf.Session(config=gpu_config)
model=None

global graph
input_ids_p, input_mask_p, label_ids_p, segment_ids_p = None, None, None, None


print('checkpoint path:{}'.format(os.path.join(model_dir, "checkpoint")))
if not os.path.exists(os.path.join(model_dir, "checkpoint")):
    raise Exception("failed to get checkpoint. going to return ")

label2id = {"0":0, "1":1}
id2label = {value: key for key, value in label2id.items()}
label_list = label2id.keys()
num_labels = len(label_list)

graph = tf.get_default_graph()
with graph.as_default():
    print("going to restore checkpoint")
    #sess.run(tf.global_variables_initializer())
    input_ids_p = tf.placeholder(tf.int32, [batch_size, MAX_SEQ_LENGTH], name="input_ids")
    input_mask_p = tf.placeholder(tf.int32, [batch_size, MAX_SEQ_LENGTH], name="input_mask")

    bert_config = modeling.BertConfig.from_json_file(os.path.join(bert_dir, 'bert_config.json'))

    model = modeling.BertModel(
        config=bert_config,
        is_training=is_training,
        input_ids=input_ids_p,
        input_mask=input_mask_p,
        token_type_ids=None, # 默认为单句子分类任务
        use_one_hot_embeddings=use_one_hot_embeddings
    )

    # embedding = model.get_pooled_output()

    output_layer = model.get_pooled_output()

    hidden_size = output_layer.shape[-1].value

    output_weights = tf.get_variable(
        "output_weights", [2, hidden_size],
        initializer=tf.truncated_normal_initializer(stddev=0.02))

    output_bias = tf.get_variable(
        "output_bias", [2], initializer=tf.zeros_initializer())

    with tf.variable_scope("loss"):
        logits = tf.matmul(output_layer, output_weights, transpose_b=True)
        logits = tf.nn.bias_add(logits, output_bias)
        probabilities = tf.nn.softmax(logits, axis=-1)
        log_probs = tf.nn.log_softmax(logits, axis=-1)



    # (total_loss, logits, trans, pred_ids) = create_model(
    #     bert_config=bert_config, is_training=False, input_ids=input_ids_p, input_mask=input_mask_p, segment_ids=None,
    #     labels=None, num_labels=num_labels, use_one_hot_embeddings=False, dropout_rate=1.0)

    saver = tf.train.Saver()

    print("model_dir: ",model_dir)

    saver.restore(sess, tf.train.latest_checkpoint(model_dir))


tokenizer = tokenization.FullTokenizer(
        vocab_file=os.path.join(bert_dir, 'vocab.txt'), do_lower_case=DO_LOWER_CASE)


def predict_online():
    """
    do online prediction. each time make prediction for one instance.
    you can change to a batch if you want.

    :param line: a list. element is: [dummy_label,text_a,text_b]
    :return:
    """
    def convert(line):
        feature = convert_single_example(0, line, label_list, MAX_SEQ_LENGTH, tokenizer, 'p')
        input_ids = np.reshape([feature.input_ids],(batch_size, MAX_SEQ_LENGTH))
        input_mask = np.reshape([feature.input_mask],(batch_size, MAX_SEQ_LENGTH))
        segment_ids = np.reshape([feature.segment_ids],(batch_size, MAX_SEQ_LENGTH))
        label_ids =np.reshape([feature.label_ids],(batch_size, MAX_SEQ_LENGTH))
        return input_ids, input_mask, segment_ids, label_ids

    global graph
    with graph.as_default():
        print(id2label)
        while True:
            print('input the test sentence:')
            sentence = str(input())
            #sentence1 = str(input()).split(' ')
            #print(sentence1)
            start = datetime.now()
            if len(sentence) < 2:
                print(sentence)
                continue
            sentence = tokenizer.tokenize(sentence)
            # print('your input is:{}'.format(sentence))
            input_ids, input_mask, segment_ids, label_ids = convert(sentence)

            feed_dict = {input_ids_p: input_ids,
                         input_mask_p: input_mask}
            # run session get current feed_dict result
            pred_ids_result = sess.run([pred_ids], feed_dict)
            pred_label_result = convert_id_to_label(pred_ids_result, id2label)
            print(pred_label_result)
            #todo: 组合策略
            result = strage_combined_link_org_loc(sentence, pred_label_result[0])
            print("研究问题："+result)
            print('time used: {} sec'.format((datetime.now() - start).total_seconds()))

def convert_id_to_label(pred_ids_result, idx2label):
    """
    将id形式的结果转化为真实序列结果
    :param pred_ids_result:
    :param idx2label:
    :return:
    """
    result = []
    for row in range(batch_size):
        curr_seq = []
        for ids in pred_ids_result[row][0]:
            if ids == 0:
                break
            curr_label = idx2label[ids]
            if curr_label in ['[CLS]', '[SEP]']:
                continue
            curr_seq.append(curr_label)
        result.append(curr_seq)
    return result



def strage_combined_link_org_loc(tokens, tags):
    """
    组合策略
    :param pred_label_result:
    :param types:
    :return:
    """
    def print_output(data, type):
        line = []
        line.append(type)
        for i in data:
            line.append(i.word)
        print(', '.join(line))

    params = None
    eval = Result(params)
    if len(tokens) > len(tags):
        tokens = tokens[:len(tags)]
    rget = eval.get_result(tokens, tags)
    # print_output(rget, 'RGET')
    # print_output(verb, 'VERB')
    # print_output(genus, 'GENUS')
    # print_output(rest, 'REST')
    # print_output(hyper, 'HYPER')
    return rget


def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, mode):
    """
    将一个样本进行分析，然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中
    :param ex_index: index
    :param example: 一个样本
    :param label_list: 标签列表
    :param max_seq_length:
    :param tokenizer:
    :param mode:
    :return:
    """
    label_map = {}
    # 1表示从1开始对label进行index化
    for (i, label) in enumerate(label_list, 1):
        label_map[label] = i
    # 保存label->index 的map
    if not os.path.exists(os.path.join(model_dir, 'label2id.pkl')):
        with codecs.open(os.path.join(model_dir, 'label2id.pkl'), 'wb') as w:
            pickle.dump(label_map, w)

    tokens = example
    # tokens = tokenizer.tokenize(example.text)
    # 序列截断
    if len(tokens) >= max_seq_length - 1:
        tokens = tokens[0:(max_seq_length - 2)]  # -2 的原因是因为序列需要加一个句首和句尾标志
    ntokens = []
    segment_ids = []
    label_ids = []
    ntokens.append("[CLS]")  # 句子开始设置CLS 标志
    segment_ids.append(0)
    # append("O") or append("[CLS]") not sure!
    label_ids.append(label_map["[CLS]"])  # O OR CLS 没有任何影响，不过我觉得O 会减少标签个数,不过拒收和句尾使用不同的标志来标注，使用LCS 也没毛病
    for i, token in enumerate(tokens):
        ntokens.append(token)
        segment_ids.append(0)
        label_ids.append(0)
    ntokens.append("[SEP]")  # 句尾添加[SEP] 标志
    segment_ids.append(0)
    # append("O") or append("[SEP]") not sure!
    label_ids.append(label_map["[SEP]"])
    input_ids = tokenizer.convert_tokens_to_ids(ntokens)  # 将序列中的字(ntokens)转化为ID形式
    input_mask = [1] * len(input_ids)

    # padding, 使用
    while len(input_ids) < max_seq_length:
        input_ids.append(0)
        input_mask.append(0)
        segment_ids.append(0)
        # we don't concerned about it!
        label_ids.append(0)
        ntokens.append("**NULL**")
        # label_mask.append(0)
    # print(len(input_ids))
    assert len(input_ids) == max_seq_length
    assert len(input_mask) == max_seq_length
    assert len(segment_ids) == max_seq_length
    assert len(label_ids) == max_seq_length
    # assert len(label_mask) == max_seq_length

    # 结构化为一个类
    feature = InputFeatures(
        input_ids=input_ids,
        input_mask=input_mask,
        segment_ids=segment_ids,
        label_ids=label_ids,
        # label_mask = label_mask
    )
    return feature


class Pair(object):
    def __init__(self, word, start, end, type, merge=False):
        self.__word = word
        self.__start = start
        self.__end = end
        self.__merge = merge
        self.__types = type

    @property
    def start(self):
        return self.__start
    @property
    def end(self):
        return self.__end
    @property
    def merge(self):
        return self.__merge
    @property
    def word(self):
        return self.__word

    @property
    def types(self):
        return self.__types
    @word.setter
    def word(self, word):
        self.__word = word
    @start.setter
    def start(self, start):
        self.__start = start
    @end.setter
    def end(self, end):
        self.__end = end
    @merge.setter
    def merge(self, merge):
        self.__merge = merge

    @types.setter
    def types(self, type):
        self.__types = type

    def __str__(self) -> str:
        line = []
        line.append('entity:{}'.format(self.__word))
        line.append('start:{}'.format(self.__start))
        line.append('end:{}'.format(self.__end))
        line.append('merge:{}'.format(self.__merge))
        line.append('types:{}'.format(self.__types))
        return '\t'.join(line)


class Result(object):
    def __init__(self, config):
        self.config = config
        self.rget = []
        self.verb = []
        self.genus = []
        self.rest = []
        self.hyper = []
    def get_result(self, tokens, tags, config=None):
        # 先获取标注结果
        return self.result_to_json(tokens, tags)
        #return self.rget, self.["verb"], self.["genus"],self.["rest"],self.["hyper"]
        #return self.rget, self.verb, self.genus, self.rest, self.hyper

    def result_to_json(self,string, tags):
        """
        将模型标注序列和输入序列结合 转化为结果
        :param string: 输入序列
        :param tags: 标注结果
        :return:
        """
        print("string:",string)
        print("tags:",tags)
        for i in range(len(tags)):
            if i > 0:
                if not '##' in string[i] and tags[i] == 'X':
                    tags[i] = '9'
                if '##' in string[i] and tags[i] != 'X':
                    tags[i] = 'X'
        

        idx = 0

        ids = []
        for i in range(len(tags)):
            if tags[i] == 'X':
                ids.append(i)

        for i in range(len(ids)):
            tags.remove('X')
        print(tags)

        new_string = []
        for i in range(len(string)):
            if i > 0 and i in ids:
                new_string[-1] = new_string[-1] + string[i].replace('##', '')
            else:
                new_string.append(string[i])
        print(new_string)

        print("new_string:",new_string)
        print("tags:",tags)

        assert len(new_string) == len(tags)


        # item={"9":[],"0":[]}
        # for char,tag in zip(new_string,tags):
        #     if tag=="9":
        #         item["9"].append(char+'')
        #     elif tag == "0":
        #         item["0"].append(char+' ')
        #      # list = ['1', '2', '3', 'a', 'b', 'c']
        #      # print(''.join(list))
        #     acb=' '.join(item["9"])

        # print(acb)
        # return acb

        words = []

        word = ''

        for i in range(len(tags)):
            if tags[i] == '9':
                word += new_string[i] + ' '
                if i == len(tags) - 1:
                    words.append(word[:-1])
            elif i > 0 and tags[i-1] == '9':
                words.append(word[:-1])
                word = ''
        
        return words


        


def predict(text):
    """
    do online prediction. each time make prediction for one instance.
    you can change to a batch if you want.

    :param line: a list. element is: [dummy_label,text_a,text_b]
    :return:
    """
    def convert(line):
        feature = convert_single_example(0, line, label_list, MAX_SEQ_LENGTH, tokenizer, 'p')
        input_ids = np.reshape([feature.input_ids],(batch_size, MAX_SEQ_LENGTH))
        input_mask = np.reshape([feature.input_mask],(batch_size, MAX_SEQ_LENGTH))
        segment_ids = np.reshape([feature.segment_ids],(batch_size, MAX_SEQ_LENGTH))
        label_ids =np.reshape([feature.label_ids],(batch_size, MAX_SEQ_LENGTH))
        return input_ids, input_mask, segment_ids, label_ids

    global graph
    with graph.as_default():
        print(id2label)
        sentence = text
        start = datetime.now()
        sentence = tokenizer.tokenize(sentence)
        print('tokenized sentence:{}'.format(sentence))
        input_ids, input_mask, segment_ids, label_ids = convert(sentence)

        feed_dict = {input_ids_p: input_ids,
                        input_mask_p: input_mask}
        # run session get current feed_dict result
        pred_ids_result = sess.run([pred_ids], feed_dict)
        pred_label_result = convert_id_to_label(pred_ids_result, id2label)
        print(pred_label_result)
        #todo: 组合策略
        result = strage_combined_link_org_loc(sentence, pred_label_result[0])
        print("研究问题：", result)
        print('time used: {} sec'.format((datetime.now() - start).total_seconds()))
        return result

@app.route('/Question_Word', methods=['GET'])
def Question_Word():
    if not request.args.get("data"):
        pass
    text = request.args.get("data")
    print(text)
    words = predict(text)

    print(words)

    return {'results':words}

if __name__ == "__main__":
    app.run('0.0.0.0', port=7200,debug=False)


    # text = "We aimed to clarify the brain region involved in motor function improvement following chronic stroke. "
    # predict(text)

