import tensorflow as tf
tf1 = tf.compat.v1
import json
import transformers
from tqdm import tqdm
import time


def read_data(file_name, max_sen_len=30):
    data_list = json.load(open(file_name))
    intent_dict, intent_dict_id, ner_dict, ner_dict_id = {}, {}, {'[PAD]':0}, {0:'[PAD]'}
    utterances, intents, slots = [], [], []
    tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-chinese')
    intent_i, slot_i = 0, 1
    for item in data_list:
        temp = ['[PAD]' for _ in range(max_sen_len)]
        for i, word in enumerate(item['utterance'][:max_sen_len]):
            temp[i] = word
        utterances.append(tokenizer.convert_tokens_to_ids(temp))
        temp = []
        for intent in item['intents']:
            if intent not in intent_dict:
                intent_dict[intent] = intent_i
                intent_dict_id[intent_i] = intent
                intent_i += 1
            temp.append(intent_dict[intent])
        intents.append(temp)
        temp = [0 for _ in range(max_sen_len)]
        for i, slot in enumerate(item['slots'][:max_sen_len]):
            if slot not in ner_dict:
                ner_dict[slot] = slot_i
                ner_dict_id[slot_i] = slot
                slot_i += 1
            temp[i] = ner_dict[slot]
        slots.append(temp)
    new_intents = []
    intent_len = len(intent_dict)
    for intent in intents:
        temp = [0 for _ in range(intent_len)]
        for num in intent:
            temp[num] = 1
        new_intents.append(temp)
    intents = new_intents
    return utterances, intents, slots, intent_dict, intent_dict_id, \
           ner_dict, ner_dict_id


class Model:
    def __init__(self, sen_len, intent_len, ner_len, hidden_len, learning_rate):
        self.word_ids = tf1.placeholder(shape=[None, sen_len], dtype=tf.int32) # 每句话单词的id
        self.intents_ids = tf1.placeholder(shape=[None, intent_len], dtype=tf.int32) # 每句话多个意图的tensor, 长度为意图总数，1和0表示有没有该意图
        self.ner_ids = tf1.placeholder(shape=[None, sen_len], dtype=tf.int32) # 每句话ner的id
        bert_model = transformers.TFBertModel.from_pretrained('git') #加载bert模型
        embedded, cls = bert_model(self.word_ids) # 生成词向量和句向量
        # 对句向量和每个意图所表示的模型做全连接，得到每个意图对应的logit
        logits = tf1.stack([tf1.layers.Dense(2, activation='relu')(cls) for _ in range(intent_len)], axis=1)
        # 对真实的意图向量做one-hot, 求log likelihood
        y_real = tf1.one_hot(self.intents_ids, 2, axis=2)
        log_likelihood = tf1.nn.softmax_cross_entropy_with_logits(labels=y_real, logits=logits, dim=2)
        # 平均得到意图的损失
        cost1 = tf1.reduce_mean(log_likelihood)
        # NER的全连接模型
        ner_dense = tf1.layers.Dense(ner_len, activation='relu')
        # 每个词向量做全连接，得到一个logit，表示这个词NER的分布
        ner_logits = []
        for i in range(sen_len):
            word_emb = embedded[:,i]
            logit = ner_dense(word_emb)
            ner_logits.append(logit)
        ner_logits = tf1.stack(ner_logits, axis=1)
        # 对真实的NER做one-hot，再求logit，交叉熵计算损失
        ner_real = tf1.one_hot(self.ner_ids, ner_len, axis=2)
        log_likelihood = tf1.nn.softmax_cross_entropy_with_logits(labels=ner_real, logits=ner_logits, dim=2)
        cost2 = tf1.reduce_mean(log_likelihood)
        # 讲意图的损失和NER的损失相加作为总损失
        self.cost = cost1 + cost2
        self.optimizer = tf1.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
        intent_pred = tf1.arg_max(logits, -1)
        accuracy = tf1.equal(intent_pred, self.intents_ids)
        accuracy = tf1.cast(accuracy, dtype=tf.float32)
        self.intent_accuracy = tf1.reduce_mean(accuracy)
        ner_pred = tf1.arg_max(ner_logits, -1)
        accuracy = tf1.equal(ner_pred, self.ner_ids)
        accuracy = tf1.cast(accuracy, dtype=tf.float32)
        self.ner_accuracy = tf1.reduce_mean(accuracy)



def main():
    sen_len = 50
    utterances, intents, slots, intent_dict, intent_dict_id, \
    ner_dict, ner_dict_id = read_data('readable_data/joint_train_data.json', sen_len)

    epoch = 3

    batch_size = 32
    intent_len = len(intent_dict)
    ner_len = len(ner_dict)
    hidden_len = 768
    learning_rate = 1e-3

    tf1.reset_default_graph()
    with tf1.Session() as sess:
        with tf.device('/device:CPU:0'):
            model = Model(sen_len, intent_len, ner_len, hidden_len, learning_rate)
            sess.run(tf1.global_variables_initializer())

            def train():
                for e in range(epoch):
                    last_time = time.time()
                    train_acc, train_loss, test_acc, test_loss = 0.0, 0.0, 0.0, 0.0
                    pbar = tqdm(range(0, len(utterances), batch_size), desc='train minibatch loop')
                    for i in pbar:
                        batch_X = utterances[i:min(i + batch_size, len(utterances))]
                        batch_intent = intents[i:min(i + batch_size, len(intents))]
                        batch_slot = slots[i:min(i + batch_size, len(slots))]
                        cost, _, acc1, acc2 = sess.run([model.cost, model.optimizer, model.intent_accuracy, model.ner_accuracy],
                                                feed_dict={model.word_ids: batch_X,
                                                           model.intents_ids: batch_intent,
                                                           model.ner_ids: batch_slot
                                                           })
                        # train_acc += acc
                        # train_loss += cost
                        # pbar.set_postfix(cost=cost, accuracy=acc)
            train()


if __name__ == '__main__':
    main()
