Spaces:
Sleeping
Sleeping
Optimize the code for converting text into tokens
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ with open("tokenizer.pckl", "rb") as file:
|
|
14 |
def classify(text: str, response: str):
|
15 |
question = list(tokenizer.texts_to_sequences([text.lower(),])[0])
|
16 |
answer = list(tokenizer.texts_to_sequences([response.lower(),])[0])
|
17 |
-
arr = np.array([
|
18 |
prediction = model.predict(arr)[0][0]
|
19 |
if prediction > 0.9:
|
20 |
return "Surely relevant "+str(prediction)
|
|
|
14 |
def classify(text: str, response: str):
|
15 |
question = list(tokenizer.texts_to_sequences([text.lower(),])[0])
|
16 |
answer = list(tokenizer.texts_to_sequences([response.lower(),])[0])
|
17 |
+
arr = np.array([question+[0,]*l1)[:l1]+(answer+[0,]*l2)[:l2],])
|
18 |
prediction = model.predict(arr)[0][0]
|
19 |
if prediction > 0.9:
|
20 |
return "Surely relevant "+str(prediction)
|