Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,6 @@ import streamlit as st
|
|
2 |
import torch
|
3 |
import torch.nn as nn
|
4 |
from transformers import PreTrainedModel, PretrainedConfig, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM
|
5 |
-
from huggingface_hub import login
|
6 |
import os
|
7 |
import time
|
8 |
|
@@ -65,17 +64,16 @@ class TinyTransformerForSequenceClassification(PreTrainedModel):
|
|
65 |
|
66 |
# Load models and tokenizers
|
67 |
@st.cache_resource
|
68 |
-
def load_models_and_tokenizers(
|
69 |
-
login(token=hf_token)
|
70 |
device = torch.device("cpu") # forcing CPU as overhead of inference on GPU slows down the inference
|
71 |
|
72 |
models = {}
|
73 |
tokenizers = {}
|
74 |
|
75 |
# Load Tiny-toxic-detector
|
76 |
-
config = TinyTransformerConfig.from_pretrained("AssistantsLab/Tiny-Toxic-Detector"
|
77 |
-
models["Tiny-toxic-detector"] = TinyTransformerForSequenceClassification.from_pretrained("AssistantsLab/Tiny-Toxic-Detector", config=config
|
78 |
-
tokenizers["Tiny-toxic-detector"] = AutoTokenizer.from_pretrained("AssistantsLab/Tiny-Toxic-Detector"
|
79 |
|
80 |
# Load other models
|
81 |
model_configs = [
|
@@ -85,8 +83,8 @@ def load_models_and_tokenizers(hf_token):
|
|
85 |
]
|
86 |
|
87 |
for model_name, model_class, tokenizer_name in model_configs:
|
88 |
-
models[model_name] = model_class.from_pretrained(model_name
|
89 |
-
tokenizers[model_name] = AutoTokenizer.from_pretrained(tokenizer_name
|
90 |
|
91 |
return models, tokenizers, device
|
92 |
|
@@ -142,8 +140,7 @@ def main():
|
|
142 |
""")
|
143 |
|
144 |
# Load models
|
145 |
-
|
146 |
-
models, tokenizers, device = load_models_and_tokenizers(hf_token)
|
147 |
|
148 |
# Reorder the models dictionary so that "Tiny-toxic-detector" is last
|
149 |
model_names = sorted(models.keys(), key=lambda x: x == "Tiny-toxic-detector")
|
@@ -177,4 +174,4 @@ def main():
|
|
177 |
st.warning("Please enter some text to classify.")
|
178 |
|
179 |
if __name__ == "__main__":
|
180 |
-
main()
|
|
|
2 |
import torch
|
3 |
import torch.nn as nn
|
4 |
from transformers import PreTrainedModel, PretrainedConfig, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM
|
|
|
5 |
import os
|
6 |
import time
|
7 |
|
|
|
64 |
|
65 |
# Load models and tokenizers
|
66 |
@st.cache_resource
|
67 |
+
def load_models_and_tokenizers():
|
|
|
68 |
device = torch.device("cpu") # forcing CPU as overhead of inference on GPU slows down the inference
|
69 |
|
70 |
models = {}
|
71 |
tokenizers = {}
|
72 |
|
73 |
# Load Tiny-toxic-detector
|
74 |
+
config = TinyTransformerConfig.from_pretrained("AssistantsLab/Tiny-Toxic-Detector")
|
75 |
+
models["Tiny-toxic-detector"] = TinyTransformerForSequenceClassification.from_pretrained("AssistantsLab/Tiny-Toxic-Detector", config=config).to(device)
|
76 |
+
tokenizers["Tiny-toxic-detector"] = AutoTokenizer.from_pretrained("AssistantsLab/Tiny-Toxic-Detector")
|
77 |
|
78 |
# Load other models
|
79 |
model_configs = [
|
|
|
83 |
]
|
84 |
|
85 |
for model_name, model_class, tokenizer_name in model_configs:
|
86 |
+
models[model_name] = model_class.from_pretrained(model_name).to(device)
|
87 |
+
tokenizers[model_name] = AutoTokenizer.from_pretrained(tokenizer_name)
|
88 |
|
89 |
return models, tokenizers, device
|
90 |
|
|
|
140 |
""")
|
141 |
|
142 |
# Load models
|
143 |
+
models, tokenizers, device = load_models_and_tokenizers()
|
|
|
144 |
|
145 |
# Reorder the models dictionary so that "Tiny-toxic-detector" is last
|
146 |
model_names = sorted(models.keys(), key=lambda x: x == "Tiny-toxic-detector")
|
|
|
174 |
st.warning("Please enter some text to classify.")
|
175 |
|
176 |
if __name__ == "__main__":
|
177 |
+
main()
|