import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

num_tags = 12
num_words = 10000
num_departments = 4

title_input = keras.Input(shape=(None,),name='title')
body_input = keras.Input(shape=(None,),name='body')
tags_input = keras.Input(shape=(num_tags,),name='tags')


# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words,64)(title_input)

# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words,64)(body_input)

# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)

# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features,body_features,tags_input])

# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1,activation='sigmoid',name='priority')(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments,activation='softmax',name='department')(x)


# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(inputs=[title_input,body_input,tags_input],
                    outputs=[priority_pred,department_pred])

keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True)

model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
              loss=['binary_crossentropy','categorical_crossentropy'],
              loss_weights=[1.,0.2])
