from transformers import TFBertModel import tensorflow as tf from tensorflow.keras.layers import Input, Dense, LSTM, Bidirectional # type: ignore from tensorflow.keras.models import Model # type: ignore from tensorflow.keras.optimizers import Adam # type: ignore from tensorflow.keras.regularizers import l1_l2 # type: ignore def build_model(max_length = 65, layer = 40,dropout = 0.69, l2_lstm = 0.01, learning_rate = 1e-4)-> Model: bert = TFBertModel.from_pretrained('bert-base-cased') # Model definition inside the loop input_ids = Input(shape=(max_length,), dtype=tf.int32, name='input_ids') bert_output = bert(input_ids)[0] # type: ignore bi_lstm_emotion = Bidirectional(LSTM(layer, dropout=dropout, kernel_regularizer=l1_l2(l2_lstm*0.15,l2_lstm)))(bert_output) bi_lstm_toxicity = Bidirectional(LSTM(layer, dropout=dropout, kernel_regularizer=l1_l2(l2_lstm*0.2,l2_lstm)))(bert_output) # outputs output_emotion = Dense(6, activation='softmax', name='emotion_output')(bi_lstm_emotion) output_toxicity = Dense(7, activation='softmax', name='toxicity_output')(bi_lstm_toxicity) model = Model(inputs=input_ids, outputs=[output_emotion, output_toxicity]) # # Compile # model = create_multitask_model_with_bert(y_toxicity, y_emotion, TFBertModel, max_length, lstm_dropout=0.2, layers=lstm_layers) model.compile( optimizer=Adam(learning_rate=learning_rate), loss={'emotion_output': 'categorical_crossentropy', 'toxicity_output': 'categorical_crossentropy'}, metrics={ 'emotion_output': ['accuracy', tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall'), tf.keras.metrics.AUC(name='em_auc', multi_label=True), tf.keras.metrics.F1Score(name='f1_score')], # type: ignore 'toxicity_output': ['accuracy', tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall'), tf.keras.metrics.AUC(name='to_auc', multi_label=True), tf.keras.metrics.F1Score(name='f1_score')], # type: ignore } ) return model