Update index.html
Browse files- index.html +52 -20
index.html
CHANGED
@@ -20,38 +20,70 @@
|
|
20 |
<body>
|
21 |
<gradio-lite>
|
22 |
<gradio-file name="app.py" entrypoint>
|
|
|
23 |
import gradio as gr
|
24 |
|
25 |
-
from filters import as_gray
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
demo = gr.Interface(
|
32 |
-
|
33 |
-
"
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
)
|
37 |
|
38 |
demo.launch()
|
39 |
</gradio-file>
|
40 |
|
41 |
-
<gradio-file name="
|
42 |
-
|
43 |
-
|
44 |
-
def as_gray(image):
|
45 |
-
return rgb2gray(image)
|
46 |
-
</gradio-file>
|
47 |
-
|
48 |
-
<gradio-file name="lion.jpg" url="https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/test_data/lion.jpg" />
|
49 |
-
<gradio-file name="logo.png" url="https://raw.githubusercontent.com/gradio-app/gradio/main/guides/assets/logo.png" />
|
50 |
|
51 |
<gradio-requirements>
|
52 |
-
|
53 |
-
scikit-image
|
54 |
</gradio-requirements>
|
55 |
</gradio-lite>
|
56 |
</body>
|
57 |
-
</html>
|
|
|
20 |
<body>
|
21 |
<gradio-lite>
|
22 |
<gradio-file name="app.py" entrypoint>
|
23 |
+
from transformers_js import import_transformers_js, as_url
|
24 |
import gradio as gr
|
25 |
|
|
|
26 |
|
27 |
+
transformers = await import_transformers_js()
|
28 |
+
AutoProcessor = transformers.AutoProcessor
|
29 |
+
AutoModel = transformers.AutoModel
|
30 |
+
RawImage = transformers.RawImage
|
31 |
+
|
32 |
+
processor = await AutoProcessor.from_pretrained('Xenova/yolov9-c');
|
33 |
+
# TODO: Resize the input image
|
34 |
+
|
35 |
+
model = await AutoModel.from_pretrained('Xenova/yolov9-c');
|
36 |
+
|
37 |
+
|
38 |
+
async def detect(image_path):
|
39 |
+
image = await RawImage.read(image_path)
|
40 |
+
|
41 |
+
processed_input = await processor(image)
|
42 |
+
|
43 |
+
# Predict bounding boxes
|
44 |
+
result = await model(images=processed_input["pixel_values"]);
|
45 |
+
|
46 |
+
outputs = result["outputs"] # Tensor
|
47 |
+
np_outputs = outputs.numpy() # [xmin, ymin, xmax, ymax, score, id][]
|
48 |
+
gradio_labels = [
|
49 |
+
# List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]
|
50 |
+
(
|
51 |
+
(
|
52 |
+
int(xmin),
|
53 |
+
int(ymin),
|
54 |
+
int(xmax),
|
55 |
+
int(ymax),
|
56 |
+
),
|
57 |
+
model.config.id2label[str(int(id))],
|
58 |
+
)
|
59 |
+
for xmin, ymin, xmax, ymax, score, id in np_outputs
|
60 |
+
]
|
61 |
+
|
62 |
+
annotated_image_data = image_path, gradio_labels
|
63 |
+
return annotated_image_data, np_outputs
|
64 |
|
65 |
demo = gr.Interface(
|
66 |
+
detect,
|
67 |
+
gr.Image(type="filepath"),
|
68 |
+
[
|
69 |
+
gr.AnnotatedImage(),
|
70 |
+
gr.JSON(),
|
71 |
+
],
|
72 |
+
examples=[
|
73 |
+
["cats.jpg"],
|
74 |
+
["city-streets.jpg"],
|
75 |
+
]
|
76 |
)
|
77 |
|
78 |
demo.launch()
|
79 |
</gradio-file>
|
80 |
|
81 |
+
<gradio-file name="cats.jpg" url="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cats.jpg" />
|
82 |
+
<gradio-file name="city-streets.jpg" url="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg" />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
<gradio-requirements>
|
85 |
+
transformers_js_py
|
|
|
86 |
</gradio-requirements>
|
87 |
</gradio-lite>
|
88 |
</body>
|
89 |
+
</html>
|