Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,63 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import
|
3 |
-
import torch
|
4 |
|
5 |
-
|
6 |
-
tokenizer = BertTokenizer.from_pretrained('RAGFillerModel1')
|
7 |
-
model = BertForSequenceClassification.from_pretrained('RAGFillerModel1', num_labels=30)
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
"
|
12 |
-
"Hmm, I need to consider that for a moment.",
|
13 |
-
"Let me think about how best to address that.",
|
14 |
-
"Well, I think it really depends on a few factors...",
|
15 |
-
"Good thought! I need a moment to process that.",
|
16 |
-
"You know, I've never really thought about it that way before.",
|
17 |
-
"Okay, let me break that down for a second.",
|
18 |
-
"That's a tough one... give me a second to gather my thoughts.",
|
19 |
-
"I want to make sure I give you the right answer, so let me think.",
|
20 |
-
"Let me reflect on that... there are a few angles to consider.",
|
21 |
-
"Alright, if I remember correctly, it goes something like this...",
|
22 |
-
"That's a good point, and I think the answer is...",
|
23 |
-
"Good question! Let me take a moment to unpack that.",
|
24 |
-
"Hmm, there's a lot to consider here. Give me a second.",
|
25 |
-
"Let me think about that... it's not a straightforward answer.",
|
26 |
-
"Interesting... I need to gather my thoughts on this.",
|
27 |
-
"Well, let me consider the various aspects before I answer.",
|
28 |
-
"Alright, let's break this down a bit before I answer.",
|
29 |
-
"Good thought! I want to make sure I address it properly.",
|
30 |
-
"Hmm, let's delve into that a bit more; I'll need a moment.",
|
31 |
-
"Great question! I want to provide a thoughtful response, so let me think.",
|
32 |
-
"That's a fascinating angle... let me think it through.",
|
33 |
-
"I'll need a moment to come up with an answer.",
|
34 |
-
"I'll take a quick moment to weigh my options.",
|
35 |
-
"I appreciate the question; let me think it through.",
|
36 |
-
"Let me take a step back and think that over.",
|
37 |
-
"Let me mull that over for just a moment.",
|
38 |
-
"I want to consider that carefully; let me pause for a second.",
|
39 |
-
"Let's explore that further; I need a moment to think.",
|
40 |
-
"I'd like to think that over before answering."
|
41 |
-
]
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
inputs
|
46 |
-
outputs =
|
|
|
|
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
predictions = predictions.cpu().detach().numpy()
|
51 |
-
|
52 |
-
labeled_predictions = {labels[i]: predictions[0][i] for i in range(len(labels))}
|
53 |
-
max_label = labels[predictions[0].argmax()]
|
54 |
-
max_probability = predictions[0].max()
|
55 |
-
|
56 |
-
result = {max_label: max_probability}
|
57 |
-
return result
|
58 |
-
|
59 |
-
# Create a Gradio interface
|
60 |
-
iface = gr.Interface(fn=classify_text, inputs="text", outputs="label")
|
61 |
-
|
62 |
-
# Launch the interface
|
63 |
-
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline
|
|
|
3 |
|
4 |
+
pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
|
|
|
|
|
5 |
|
6 |
+
def predict(input_img):
|
7 |
+
predictions = pipeline(input_img)
|
8 |
+
return input_img, {p["label"]: p["score"] for p in predictions}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
gradio_app = gr.Interface(
|
11 |
+
predict,
|
12 |
+
inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
|
13 |
+
outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
|
14 |
+
title="Hot Dog? Or Not?",
|
15 |
+
)
|
16 |
|
17 |
+
if __name__ == "__main__":
|
18 |
+
gradio_app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|