yuragoithf commited on
Commit
c899fa2
1 Parent(s): ff2d498

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -54
app.py CHANGED
@@ -1,54 +1,9 @@
1
- import gradio as gr
2
- from PIL import Image
3
- from transformers import AutoFeatureExtractor, AutoModelForImageSegmentation
4
- import tensorflow as tf
5
-
6
- extractor = AutoFeatureExtractor.from_pretrained("facebook/detr-resnet-50-panoptic")
7
- model = AutoModelForImageSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic")
8
-
9
-
10
- # Perform image classification for single class output
11
- # def predict_class(image):
12
- # img = tf.cast(image, tf.float32)
13
- # img = tf.image.resize(img, [input_shape[0], input_shape[1]])
14
- # img = tf.expand_dims(img, axis=0)
15
- # prediction = model.predict(img)
16
- # class_index = tf.argmax(prediction[0]).numpy()
17
- # predicted_class = labels[class_index]
18
- # return predicted_class
19
-
20
- # Perform image classification for multy class output
21
- def predict_class(image):
22
- img = tf.cast(image, tf.float32)
23
- prediction = model.predict(img)
24
- return prediction
25
-
26
- # UI Design for single class output
27
- # def classify_image(image):
28
- # predicted_class = predict_class(image)
29
- # output = f"<h2>Predicted Class: <span style='text-transform:uppercase';>{predicted_class}</span></h2>"
30
- # return output
31
-
32
-
33
- # UI Design for multy class output
34
- def classify_image(image):
35
- results = predict_class(image)
36
-
37
- return results
38
-
39
-
40
- inputs = gr.inputs.Image(type="pil", label="Upload an image")
41
- # outputs = gr.outputs.HTML() #uncomment for single class output
42
- outputs = gr.outputs.Label(num_top_classes=4)
43
-
44
- title = "<h1 style='text-align: center;'>Image Classifier</h1>"
45
- description = "Upload an image and get the predicted class."
46
- # css_code='body{background-image:url("file=wave.mp4");}'
47
-
48
- gr.Interface(fn=classify_image,
49
- inputs=inputs,
50
- outputs=outputs,
51
- title=title,
52
- examples=[["00_plane.jpg"], ["01_car.jpg"], ["02_bird.jpg"], ["03_cat.jpg"], ["04_deer.jpg"]],
53
- # css=css_code,
54
- description=description).launch()
 
1
+ import json
2
+ import requests
3
+ API_URL = "https://api-inference.huggingface.co/models/gpt2"
4
+ headers = {"Authorization": f"Bearer {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}"}
5
+ def query(payload):
6
+ data = json.dumps(payload)
7
+ response = requests.request("POST", API_URL, headers=headers, data=data)
8
+ return json.loads(response.content.decode("utf-8"))
9
+ data = query("Can you please let us know more details about your ")