Spaces:
Sleeping
Sleeping
Commit
β’
b7e2f3c
0
Parent(s):
Duplicate from Panel-Org/panel-template
Browse filesCo-authored-by: Sophia Yang <sophiamyang@users.noreply.huggingface.co>
- .gitattributes +34 -0
- Dockerfile +16 -0
- README.md +10 -0
- app.py +147 -0
- requirements.txt +6 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY ./requirements.txt /code/requirements.txt
|
6 |
+
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
7 |
+
RUN python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
8 |
+
|
9 |
+
COPY . .
|
10 |
+
|
11 |
+
CMD ["panel", "serve", "/code/app.py", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "*"]
|
12 |
+
|
13 |
+
RUN mkdir /.cache
|
14 |
+
RUN chmod 777 /.cache
|
15 |
+
RUN mkdir .chroma
|
16 |
+
RUN chmod 777 .chroma
|
README.md
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Panel Template
|
3 |
+
emoji: π
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: green
|
6 |
+
sdk: docker
|
7 |
+
pinned: false
|
8 |
+
---
|
9 |
+
|
10 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import random
|
3 |
+
from typing import List, Tuple
|
4 |
+
|
5 |
+
import aiohttp
|
6 |
+
import panel as pn
|
7 |
+
from PIL import Image
|
8 |
+
from transformers import CLIPModel, CLIPProcessor
|
9 |
+
|
10 |
+
pn.extension(design="bootstrap", sizing_mode="stretch_width")
|
11 |
+
|
12 |
+
ICON_URLS = {
|
13 |
+
"brand-github": "https://github.com/holoviz/panel",
|
14 |
+
"brand-twitter": "https://twitter.com/Panel_Org",
|
15 |
+
"brand-linkedin": "https://www.linkedin.com/company/panel-org",
|
16 |
+
"message-circle": "https://discourse.holoviz.org/",
|
17 |
+
"brand-discord": "https://discord.gg/AXRHnJU6sP",
|
18 |
+
}
|
19 |
+
|
20 |
+
|
21 |
+
async def random_url(_):
|
22 |
+
pet = random.choice(["cat", "dog"])
|
23 |
+
api_url = f"https://api.the{pet}api.com/v1/images/search"
|
24 |
+
async with aiohttp.ClientSession() as session:
|
25 |
+
async with session.get(api_url) as resp:
|
26 |
+
return (await resp.json())[0]["url"]
|
27 |
+
|
28 |
+
|
29 |
+
@pn.cache
|
30 |
+
def load_processor_model(
|
31 |
+
processor_name: str, model_name: str
|
32 |
+
) -> Tuple[CLIPProcessor, CLIPModel]:
|
33 |
+
processor = CLIPProcessor.from_pretrained(processor_name)
|
34 |
+
model = CLIPModel.from_pretrained(model_name)
|
35 |
+
return processor, model
|
36 |
+
|
37 |
+
|
38 |
+
async def open_image_url(image_url: str) -> Image:
|
39 |
+
async with aiohttp.ClientSession() as session:
|
40 |
+
async with session.get(image_url) as resp:
|
41 |
+
return Image.open(io.BytesIO(await resp.read()))
|
42 |
+
|
43 |
+
|
44 |
+
def get_similarity_scores(class_items: List[str], image: Image) -> List[float]:
|
45 |
+
processor, model = load_processor_model(
|
46 |
+
"openai/clip-vit-base-patch32", "openai/clip-vit-base-patch32"
|
47 |
+
)
|
48 |
+
inputs = processor(
|
49 |
+
text=class_items,
|
50 |
+
images=[image],
|
51 |
+
return_tensors="pt", # pytorch tensors
|
52 |
+
)
|
53 |
+
outputs = model(**inputs)
|
54 |
+
logits_per_image = outputs.logits_per_image
|
55 |
+
class_likelihoods = logits_per_image.softmax(dim=1).detach().numpy()
|
56 |
+
return class_likelihoods[0]
|
57 |
+
|
58 |
+
|
59 |
+
async def process_inputs(class_names: List[str], image_url: str):
|
60 |
+
"""
|
61 |
+
High level function that takes in the user inputs and returns the
|
62 |
+
classification results as panel objects.
|
63 |
+
"""
|
64 |
+
try:
|
65 |
+
main.disabled = True
|
66 |
+
if not image_url:
|
67 |
+
yield "##### β οΈ Provide an image URL"
|
68 |
+
return
|
69 |
+
|
70 |
+
yield "##### β Fetching image and running model..."
|
71 |
+
try:
|
72 |
+
pil_img = await open_image_url(image_url)
|
73 |
+
img = pn.pane.Image(pil_img, height=400, align="center")
|
74 |
+
except Exception as e:
|
75 |
+
yield f"##### π Something went wrong, please try a different URL!"
|
76 |
+
return
|
77 |
+
|
78 |
+
class_items = class_names.split(",")
|
79 |
+
class_likelihoods = get_similarity_scores(class_items, pil_img)
|
80 |
+
|
81 |
+
# build the results column
|
82 |
+
results = pn.Column("##### π Here are the results!", img)
|
83 |
+
|
84 |
+
for class_item, class_likelihood in zip(class_items, class_likelihoods):
|
85 |
+
row_label = pn.widgets.StaticText(
|
86 |
+
name=class_item.strip(), value=f"{class_likelihood:.2%}", align="center"
|
87 |
+
)
|
88 |
+
row_bar = pn.indicators.Progress(
|
89 |
+
value=int(class_likelihood * 100),
|
90 |
+
sizing_mode="stretch_width",
|
91 |
+
bar_color="secondary",
|
92 |
+
margin=(0, 10),
|
93 |
+
design=pn.theme.Material,
|
94 |
+
)
|
95 |
+
results.append(pn.Column(row_label, row_bar))
|
96 |
+
yield results
|
97 |
+
finally:
|
98 |
+
main.disabled = False
|
99 |
+
|
100 |
+
|
101 |
+
# create widgets
|
102 |
+
randomize_url = pn.widgets.Button(name="Randomize URL", align="end")
|
103 |
+
|
104 |
+
image_url = pn.widgets.TextInput(
|
105 |
+
name="Image URL to classify",
|
106 |
+
value=pn.bind(random_url, randomize_url),
|
107 |
+
)
|
108 |
+
class_names = pn.widgets.TextInput(
|
109 |
+
name="Comma separated class names",
|
110 |
+
placeholder="Enter possible class names, e.g. cat, dog",
|
111 |
+
value="cat, dog, parrot",
|
112 |
+
)
|
113 |
+
|
114 |
+
input_widgets = pn.Column(
|
115 |
+
"##### π Click randomize or paste a URL to start classifying!",
|
116 |
+
pn.Row(image_url, randomize_url),
|
117 |
+
class_names,
|
118 |
+
)
|
119 |
+
|
120 |
+
# add interactivity
|
121 |
+
interactive_result = pn.panel(
|
122 |
+
pn.bind(process_inputs, image_url=image_url, class_names=class_names),
|
123 |
+
height=600,
|
124 |
+
)
|
125 |
+
|
126 |
+
# add footer
|
127 |
+
footer_row = pn.Row(pn.Spacer(), align="center")
|
128 |
+
for icon, url in ICON_URLS.items():
|
129 |
+
href_button = pn.widgets.Button(icon=icon, width=35, height=35)
|
130 |
+
href_button.js_on_click(code=f"window.open('{url}')")
|
131 |
+
footer_row.append(href_button)
|
132 |
+
footer_row.append(pn.Spacer())
|
133 |
+
|
134 |
+
# create dashboard
|
135 |
+
main = pn.WidgetBox(
|
136 |
+
input_widgets,
|
137 |
+
interactive_result,
|
138 |
+
footer_row,
|
139 |
+
)
|
140 |
+
|
141 |
+
title = "Panel Demo - Image Classification"
|
142 |
+
pn.template.BootstrapTemplate(
|
143 |
+
title=title,
|
144 |
+
main=main,
|
145 |
+
main_max_width="min(50%, 698px)",
|
146 |
+
header_background="#F08080",
|
147 |
+
).servable(title=title)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
panel
|
2 |
+
jupyter
|
3 |
+
transformers
|
4 |
+
numpy
|
5 |
+
torch
|
6 |
+
aiohttp
|