vluz commited on
Commit
f11b351
1 Parent(s): d8d7a49

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import streamlit as st
4
+ import tensorflow as tf
5
+ from tensorflow.keras.layers import TextVectorization
6
+
7
+
8
+ @st.cache_resource
9
+ def load_model():
10
+ model = tf.keras.models.load_model(os.path.join("model", "toxmodel.keras"))
11
+ return model
12
+
13
+
14
+ @st.cache_resource
15
+ def load_vectorizer():
16
+ from_disk = pickle.load(open(os.path.join("model", "vectorizer.pkl"), "rb"))
17
+ new_v = TextVectorization.from_config(from_disk['config'])
18
+ new_v.adapt(tf.data.Dataset.from_tensor_slices(["xyz"])) # Keras bug
19
+ new_v.set_weights(from_disk['weights'])
20
+ return new_v
21
+
22
+
23
+ @st.cache_resource
24
+ def load_vocab():
25
+ vocab = {}
26
+ with open('vocab.txt', 'r') as f:
27
+ for line in f:
28
+ token, index = line.strip().split('\t')
29
+ vocab[token] = int(index)
30
+
31
+
32
+ st.title("Toxic Comment Test")
33
+ st.divider()
34
+ model = load_model()
35
+ vectorizer = load_vectorizer()
36
+ input_text = st.text_area("Comment:", "I love you man, but fuck you!", height=150)
37
+ if st.button("Test"):
38
+ with st.spinner("Testing..."):
39
+ inputv = vectorizer([input_text])
40
+ output = model.predict(inputv)
41
+ res = (output > 0.5)
42
+ st.write(["toxic","severe toxic","obscene","threat","insult","identity hate"], res)
43
+ st.write(output)
44
+ print(output)
45
+
46
+