import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
from bert4keras.models import build_transformer_model
from bert4keras.backend import is_tf_keras, set_gelu, keras


set_gelu('tanh')


def textcnn(inputs,kernel_initializer):
    # 3,4,5
    cnn1 = keras.layers.Conv1D(
        256,
        3,
        strides=1,
        padding='same',
        activation='relu',
        kernel_initializer=kernel_initializer
    )(inputs) # shape=[batch_size,maxlen-2,256]
    cnn1 = keras.layers.GlobalMaxPooling1D()(cnn1) # shape=[batch_size,256]

    cnn2 = keras.layers.Conv1D(
        256,
        4,
        strides=1,
        padding='same',
        activation='relu',
        kernel_initializer=kernel_initializer
    )(inputs)
    cnn2 = keras.layers.GlobalMaxPooling1D()(cnn2)

    cnn3 = keras.layers.Conv1D(
        256,
        5,
        strides=1,
        padding='same',
        kernel_initializer=kernel_initializer
    )(inputs)
    cnn3 = keras.layers.GlobalMaxPooling1D()(cnn3)

    cnn = keras.layers.concatenate([cnn1,cnn2,cnn3],axis=-1)
    output = keras.layers.Dropout(0.2)(cnn)

    return output



def build_bert_model(config_path, checkpoint_path, class_nums):
    #bert模型的预加载
    bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path,
                                   model='albert', return_keras_model=False)
    # bert的输入是[CLS] token1 token2 token3 ... [sep]
    # 要从输出里提取到CLS，bert的输出是768维的语义向量
    # 用Lambda函数抽取所有行的第一列，因为CLS在第一个位置，如果后面不再接textCNN的话，就可以直接拿CLS这个向量，后面接全连接层去做分类了
    # 让每条文本的长度相同，用0填充
    cls_features = keras.layers.Lambda(
        lambda  x: x[:,0],
        name = 'cls_token'
    )(bert.model.output)    #shapre = [batch_size, 768]
    #去掉CLS和SEP的所有token（第一列到倒数第二列），抽取所有token的embedding，可以看作是input经过embedding之后的结果
    #其实就是一个embedding矩阵，将这个矩阵传给TextCNN
    all_token_embedding = keras.layers.Lambda(
        lambda x: x[:,1:-1],
        name= 'all_token'
    )(bert.model.output)      #shape = [batch_size, maxlen-2, 768]

    indrnn_features = textcnn(all_token_embedding, bert.initializer)
    concat_features = keras.layers.concatenate([cls_features, indrnn_features], axis=-1)
    dense = keras.layers.Dense(
        units=512,
        activation='relu',
        kernel_initializer=bert.initializer
    )(concat_features)

    output = keras.layers.Dense(
        units=class_nums,
        activation='softmax',
        kernel_initializer=bert.initializer
    )(dense)

    model = keras.models.Model(bert.model.input, output)
    print(model.summary())
    return model


def seq_padding(X, padding=0):
    L = [len(x) for x in X]
    ML = max(L)
    return np.array([
        np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X
    ])


def get_max_len(text_data):
    max_len = 0
    for i in range(len(text_data)):
        if (len(str(text_data[i]).split(' ')) - 1) > max_len:
            max_len = len(str(text_data[i]).split(' ')) - 1
    return max_len


