rushic24 commited on
Commit
5e0df37
1 Parent(s): 75ade80

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +166 -0
app.py CHANGED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import string
3
+ import re
4
+ import numpy as np
5
+ import tensorflow as tf
6
+ from tensorflow import keras
7
+ from tensorflow.keras import layers
8
+ from tensorflow.keras.layers import TextVectorization
9
+
10
+ strip_chars = pickle.load(open('strip_chars.pkl', 'rb'))
11
+
12
+ vocab_size = 15000
13
+ sequence_length = 20
14
+ batch_size = 64
15
+
16
+ class TransformerEncoder(layers.Layer):
17
+ def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
18
+ super(TransformerEncoder, self).__init__(**kwargs)
19
+ self.embed_dim = embed_dim
20
+ self.dense_dim = dense_dim
21
+ self.num_heads = num_heads
22
+ self.attention = layers.MultiHeadAttention(
23
+ num_heads=num_heads, key_dim=embed_dim
24
+ )
25
+ self.dense_proj = keras.Sequential(
26
+ [layers.Dense(dense_dim, activation="relu"), layers.Dense(embed_dim),]
27
+ )
28
+ self.layernorm_1 = layers.LayerNormalization()
29
+ self.layernorm_2 = layers.LayerNormalization()
30
+ self.supports_masking = True
31
+
32
+ def call(self, inputs, mask=None):
33
+ if mask is not None:
34
+ padding_mask = tf.cast(mask[:, tf.newaxis, tf.newaxis, :], dtype="int32")
35
+ attention_output = self.attention(
36
+ query=inputs, value=inputs, key=inputs, attention_mask=padding_mask
37
+ )
38
+ proj_input = self.layernorm_1(inputs + attention_output)
39
+ proj_output = self.dense_proj(proj_input)
40
+ return self.layernorm_2(proj_input + proj_output)
41
+
42
+
43
+ class PositionalEmbedding(layers.Layer):
44
+ def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):
45
+ super(PositionalEmbedding, self).__init__(**kwargs)
46
+ self.token_embeddings = layers.Embedding(
47
+ input_dim=vocab_size, output_dim=embed_dim
48
+ )
49
+ self.position_embeddings = layers.Embedding(
50
+ input_dim=sequence_length, output_dim=embed_dim
51
+ )
52
+ self.sequence_length = sequence_length
53
+ self.vocab_size = vocab_size
54
+ self.embed_dim = embed_dim
55
+
56
+ def call(self, inputs):
57
+ length = tf.shape(inputs)[-1]
58
+ positions = tf.range(start=0, limit=length, delta=1)
59
+ embedded_tokens = self.token_embeddings(inputs)
60
+ embedded_positions = self.position_embeddings(positions)
61
+ return embedded_tokens + embedded_positions
62
+
63
+ def compute_mask(self, inputs, mask=None):
64
+ return tf.math.not_equal(inputs, 0)
65
+
66
+
67
+ class TransformerDecoder(layers.Layer):
68
+ def __init__(self, embed_dim, latent_dim, num_heads, **kwargs):
69
+ super(TransformerDecoder, self).__init__(**kwargs)
70
+ self.embed_dim = embed_dim
71
+ self.latent_dim = latent_dim
72
+ self.num_heads = num_heads
73
+ self.attention_1 = layers.MultiHeadAttention(
74
+ num_heads=num_heads, key_dim=embed_dim
75
+ )
76
+ self.attention_2 = layers.MultiHeadAttention(
77
+ num_heads=num_heads, key_dim=embed_dim
78
+ )
79
+ self.dense_proj = keras.Sequential(
80
+ [layers.Dense(latent_dim, activation="relu"), layers.Dense(embed_dim),]
81
+ )
82
+ self.layernorm_1 = layers.LayerNormalization()
83
+ self.layernorm_2 = layers.LayerNormalization()
84
+ self.layernorm_3 = layers.LayerNormalization()
85
+ self.supports_masking = True
86
+
87
+ def call(self, inputs, encoder_outputs, mask=None):
88
+ causal_mask = self.get_causal_attention_mask(inputs)
89
+ if mask is not None:
90
+ padding_mask = tf.cast(mask[:, tf.newaxis, :], dtype="int32")
91
+ padding_mask = tf.minimum(padding_mask, causal_mask)
92
+
93
+ attention_output_1 = self.attention_1(
94
+ query=inputs, value=inputs, key=inputs, attention_mask=causal_mask
95
+ )
96
+ out_1 = self.layernorm_1(inputs + attention_output_1)
97
+
98
+ attention_output_2 = self.attention_2(
99
+ query=out_1,
100
+ value=encoder_outputs,
101
+ key=encoder_outputs,
102
+ attention_mask=padding_mask,
103
+ )
104
+ out_2 = self.layernorm_2(out_1 + attention_output_2)
105
+
106
+ proj_output = self.dense_proj(out_2)
107
+ return self.layernorm_3(out_2 + proj_output)
108
+
109
+ def get_causal_attention_mask(self, inputs):
110
+ input_shape = tf.shape(inputs)
111
+ batch_size, sequence_length = input_shape[0], input_shape[1]
112
+ i = tf.range(sequence_length)[:, tf.newaxis]
113
+ j = tf.range(sequence_length)
114
+ mask = tf.cast(i >= j, dtype="int32")
115
+ mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
116
+ mult = tf.concat(
117
+ [tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)],
118
+ axis=0,
119
+ )
120
+ return tf.tile(mask, mult)
121
+
122
+ custom_objects={'TransformerEncoder': TransformerEncoder, 'TransformerDecoder': TransformerDecoder, 'PositionalEmbedding':PositionalEmbedding}
123
+ transformer = keras.models.load_model("model.h5", custom_objects=custom_objects)
124
+
125
+ def custom_standardization(input_string):
126
+ lowercase = tf.strings.lower(input_string)
127
+ return tf.strings.regex_replace(lowercase, "[%s]" % re.escape(strip_chars), "")
128
+
129
+
130
+ eng_vectorization = TextVectorization(
131
+ max_tokens=vocab_size, output_mode="int", output_sequence_length=sequence_length,
132
+ )
133
+ spa_vectorization = TextVectorization(
134
+ max_tokens=vocab_size,
135
+ output_mode="int",
136
+ output_sequence_length=sequence_length + 1,
137
+ standardize=custom_standardization,
138
+ )
139
+ train_eng_texts = [pair[0] for pair in train_pairs]
140
+ train_spa_texts = [pair[1] for pair in train_pairs]
141
+ eng_vectorization.adapt(train_eng_texts)
142
+ spa_vectorization.adapt(train_spa_texts)
143
+
144
+ inputs = gr.inputs.Textbox(lines=1, label="Text in Spanish")
145
+ outputs = [gr.outputs.Textbox(label="Translated text in Quechua")]
146
+
147
+ def get_translate(input_sentence):
148
+ spa_vocab = spa_vectorization.get_vocabulary()
149
+ spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab))
150
+ max_decoded_sentence_length = 20
151
+ tokenized_input_sentence = eng_vectorization([input_sentence])
152
+ decoded_sentence = "[start]"
153
+ for i in range(max_decoded_sentence_length):
154
+ tokenized_target_sentence = spa_vectorization([decoded_sentence])[:, :-1]
155
+ predictions = transformer([tokenized_input_sentence, tokenized_target_sentence])
156
+
157
+ sampled_token_index = np.argmax(predictions[0, i, :])
158
+ sampled_token = spa_index_lookup[sampled_token_index]
159
+ decoded_sentence += " " + sampled_token
160
+
161
+ if sampled_token == "[end]":
162
+ break
163
+ return decoded_sentence.replace("[start]", "").replace("[end]", "")
164
+ iface=gr.Interface(fn=get_translate,inputs=inputs , outputs=outputs, title='Sakil EnglishToGerman Translator APP')
165
+
166
+ iface.launch(debug=True)