Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from todset import todset
|
3 |
+
from keras.
|
4 |
+
|
5 |
+
def train(data: str, message: str):
|
6 |
+
if "→" not in data and "\n" not in data:
|
7 |
+
return "Dataset should be like:\nquestion→answer\nquestion→answer\netc."
|
8 |
+
dset, responses = todset(data)
|
9 |
+
tokenizer = Tokenizer()
|
10 |
+
tokenizer.fit_on_texts(list(dset.keys()))
|
11 |
+
|
12 |
+
vocab_size = len(tokenizer.word_index) + 1
|
13 |
+
|
14 |
+
model = Sequential()
|
15 |
+
model.add(Embedding(input_dim=vocab_size, output_dim=emb_size, input_length=inp_len))
|
16 |
+
model.add(SeqSelfAttention())
|
17 |
+
model.add(Flatten())
|
18 |
+
model.add(Dense(1024, activation="relu"))
|
19 |
+
model.add(Dropout(0.5))
|
20 |
+
model.add(Dense(512, activation="relu"))
|
21 |
+
model.add(Dense(512, activation="relu"))
|
22 |
+
model.add(Dense(256, activation="relu"))
|
23 |
+
model.add(Dense(dset_size, activation="softmax"))
|
24 |
+
|
25 |
+
X = []
|
26 |
+
y = []
|
27 |
+
|
28 |
+
for key in dset:
|
29 |
+
tokens = tokenizer.texts_to_sequences([key,])[0]
|
30 |
+
X.append(np.array((list(tokens)+[0,]*inp_len)[:inp_len]))
|
31 |
+
output_array = np.zeros(dset_size)
|
32 |
+
output_array[dset[key]] = 1
|
33 |
+
y.append(output_array)
|
34 |
+
|
35 |
+
X = np.array(X)
|
36 |
+
y = np.array(y)
|
37 |
+
|
38 |
+
model.compile(loss="categorical_crossentropy", metrics=["accuracy",])
|
39 |
+
|
40 |
+
model.fit(X, y, epochs=10, batch_size=8, workers=4, use_multiprocessing=True)
|
41 |
+
tokens = tokenizer.texts_to_sequences([message,])[0]
|
42 |
+
prediction = model.predict(np.array((list(tokens)+[0,]*inp_len)[:inp_len]))
|
43 |
+
max_o = 0
|
44 |
+
max_v = 0
|
45 |
+
for ind, i in enumerate(prediction):
|
46 |
+
if max_v < i:
|
47 |
+
max_v = i
|
48 |
+
max_o = ind
|
49 |
+
return responses[ind]
|
50 |
+
|
51 |
+
iface = gr.Interface(fn=greet, inputs=["text", "text"], outputs="text")
|
52 |
+
iface.launch()
|