#!/usr/bin/env python3
# coding:utf-8
"""
---------------------------
    File Name : albert_vector
    Description :
----------------------------
"""
from __future__ import absolute_import, division, print_function, unicode_literals

import glob
import os
import numpy as np

import tensorflow as tf
import bert
from bert.tokenization.albert_tokenization import FullTokenizer

def build_transformer(max_seq_length,bert_dir):
    lower_case = True
    vocab = glob.glob(os.path.join(bert_dir, '*vocab*.txt'))[0]
    tokenizer = FullTokenizer(vocab_file=vocab, do_lower_case=lower_case)
    bert_params = bert.params_from_pretrained_ckpt(bert_dir)

    l_bert = bert.BertModelLayer.from_params(bert_params, name='albert')
    l_input_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name="input_ids")
    l_mask_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name="mask_ids")
    l_token_type_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name="token_type_ids")
    output = l_bert([l_input_ids, l_token_type_ids], mask=l_mask_ids)
    model = tf.keras.Model(inputs=[l_input_ids, l_mask_ids, l_token_type_ids], outputs=output)
    model.build(input_shape=(None, max_seq_length))
    ckpt = glob.glob(os.path.join(bert_dir, '*.index'))
    assert ckpt, f'No checkpoint found under {bert_dir}'
    ckpt, _ = os.path.splitext(ckpt[0])
    bert.load_bert_weights(l_bert, ckpt)
    return model,tokenizer


def inputs_to_samples(words,max_seq_length,tokenizer,
                      unk_token='[UNK]',
                      mask_padding_with_zero=True,
                      sequence_a_segment_id=0,
                      pad_token=0,
                      pad_token_segment_id=0
                      ):
    tokens = []
    for word in words:
        word_tokens = tokenizer.tokenize(word)
        if not word_tokens:
            # some wired chars cause the tagger to return empty list
            word_tokens = [unk_token] * len(word)
        tokens.extend(word_tokens)

    input_ids = tokenizer.convert_tokens_to_ids(tokens)
    input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
    segment_ids = [sequence_a_segment_id] * len(tokens)

    padding_length = max_seq_length - len(input_ids)

    input_ids += [pad_token] * padding_length
    input_mask += [0 if mask_padding_with_zero else 1] * padding_length
    segment_ids += [pad_token_segment_id] * padding_length

    return np.array([input_ids]),np.array([input_mask]),np.array([segment_ids])



if __name__ == "__main__":
    bert_dir = "../models/albert_base_zh"
    max_seq_length = 128
    model,tokenizer = build_transformer(max_seq_length,bert_dir)
    sent = list("十九大")
    input_ids, input_mask, segment_ids = inputs_to_samples(sent,max_seq_length,tokenizer)
    print(input_ids.shape)
    feed_dict = {}
    feed_dict['input_ids'] = input_ids
    feed_dict['mask_ids'] = input_ids
    feed_dict['token_type_ids'] = segment_ids
    print(model.predict(feed_dict).shape)