File size: 2,366 Bytes
c3030da
 
413f6ec
94c8a62
 
 
413f6ec
94c8a62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
00a93f2
 
413f6ec
 
 
94c8a62
413f6ec
 
94c8a62
 
413f6ec
94c8a62
 
413f6ec
 
c3030da
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gradio as gr

import tensorflow as tf
import numpy as np
tokenizer=tf.keras.preprocessing.text.Tokenizer()
STOP_WORD_TITLE = '📕'

vocab_size = 101
embedding_dim = 256
rnn_units = 1024
batch_size = 1  # Since we're generating one recipe at a time


tokenizer_config = tokenizer.to_json()
# Tokenizer setup
tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(tokenizer_config)
STOP_SIGN = '␣'
def generate_text(model, start_string, num_generate = 1000):
    # Evaluation step (generating text using the learned model)

    padded_start_string = STOP_WORD_TITLE + start_string

    # Converting our start string to numbers (vectorizing).
    input_indices = np.array(tokenizer.texts_to_sequences([padded_start_string]))

    # Empty string to store our results.
    text_generated = []

    # Here batch size == 1.
    model.reset_states()
    for char_index in range(num_generate):
        predictions = model(input_indices)
        # remove the batch dimension
        predictions = tf.squeeze(predictions, 0)

        # Using a categorical distribution to predict the character returned by the model.
        predicted_id = tf.random.categorical(
            predictions,
            num_samples=1
        )[-1,0].numpy()

        # We pass the predicted character as the next input to the model
        # along with the previous hidden state.
        input_indices = tf.expand_dims([predicted_id], 0)

        next_character = tokenizer.sequences_to_texts(input_indices.numpy())[0]

        text_generated.append(next_character)

    return (padded_start_string + ''.join(text_generated)) 
    
def load_model():
    model_path = "recipe/recipe_generation_rnn_raw_1-h5.h5"
    model = tf.keras.models.load_model(model_path)
    return model


def predict():
    input_data = request.json['input_data']  # Get input data from the request body
    padded_start_string = STOP_WORD_TITLE + input_data
    input_indices = np.array(tokenizer.texts_to_sequences([padded_start_string]))  # Preprocess the input data
    model = load_model('recipe/recipe_generation_rnn_raw_1-h5.h5')  # Load the model
    generated_text = generate_text(model, ingredients, recipe_length)
    response = jsonify(generated_text.tolist())  # Convert predictions to JSON format
    return response


demo = gr.Interface(fn=greet, inputs="text", outputs="text")
demo.launch()