Spaces:
Runtime error
Runtime error
import gradio as gr | |
import tensorflow as tf | |
import numpy as np | |
tokenizer=tf.keras.preprocessing.text.Tokenizer() | |
STOP_WORD_TITLE = 'π' | |
vocab_size = 101 | |
embedding_dim = 256 | |
rnn_units = 1024 | |
batch_size = 1 # Since we're generating one recipe at a time | |
tokenizer_config = tokenizer.to_json() | |
# Tokenizer setup | |
tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(tokenizer_config) | |
STOP_SIGN = 'β£' | |
def generate_text(model, start_string, num_generate = 1000): | |
# Evaluation step (generating text using the learned model) | |
padded_start_string = STOP_WORD_TITLE + start_string | |
# Converting our start string to numbers (vectorizing). | |
input_indices = np.array(tokenizer.texts_to_sequences([padded_start_string])) | |
# Empty string to store our results. | |
text_generated = [] | |
# Here batch size == 1. | |
model.reset_states() | |
for char_index in range(num_generate): | |
predictions = model(input_indices) | |
# remove the batch dimension | |
predictions = tf.squeeze(predictions, 0) | |
# Using a categorical distribution to predict the character returned by the model. | |
predicted_id = tf.random.categorical( | |
predictions, | |
num_samples=1 | |
)[-1,0].numpy() | |
# We pass the predicted character as the next input to the model | |
# along with the previous hidden state. | |
input_indices = tf.expand_dims([predicted_id], 0) | |
next_character = tokenizer.sequences_to_texts(input_indices.numpy())[0] | |
text_generated.append(next_character) | |
return (padded_start_string + ''.join(text_generated)) | |
def load_model(): | |
model_path = "recipe/recipe_generation_rnn_raw_1-h5.h5" | |
model = tf.keras.models.load_model(model_path) | |
return model | |
def predict(): | |
input_data = request.json['input_data'] # Get input data from the request body | |
padded_start_string = STOP_WORD_TITLE + input_data | |
input_indices = np.array(tokenizer.texts_to_sequences([padded_start_string])) # Preprocess the input data | |
model = load_model('recipe/recipe_generation_rnn_raw_1-h5.h5') # Load the model | |
generated_text = generate_text(model, ingredients, recipe_length) | |
response = jsonify(generated_text.tolist()) # Convert predictions to JSON format | |
return response | |
demo = gr.Interface(fn=greet, inputs="text", outputs="text") | |
demo.launch() |