Spaces:
Running
Running
update-gradio (#3)
Browse files- Update licence (128831be86625cf07c820ec3746883d7ead110b3)
- Update requirements (480bc123d88871d3641d3ca177ff3d66d93a609c)
- Update interaction code (04dd7ac440b8da8386a4d36c08b23e7af4dd296e)
LICENCE
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
The MIT License (MIT)
|
2 |
|
3 |
-
Copyright (c)
|
4 |
|
5 |
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
of this software and associated documentation files (the "Software"), to deal
|
|
|
1 |
The MIT License (MIT)
|
2 |
|
3 |
+
Copyright (c) 2024 Alessandro Conti
|
4 |
|
5 |
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
of this software and associated documentation files (the "Software"), to deal
|
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: π
|
|
4 |
colorFrom: green
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
-
sdk_version: '4.
|
8 |
python_version: '3.9'
|
9 |
app_file: app.py
|
10 |
pinned: false
|
|
|
4 |
colorFrom: green
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
+
sdk_version: '4.44.1'
|
8 |
python_version: '3.9'
|
9 |
app_file: app.py
|
10 |
pinned: false
|
app.py
CHANGED
@@ -43,7 +43,7 @@ MODEL = AutoModel.from_pretrained("altndrr/cased", trust_remote_code=True).to(DE
|
|
43 |
PROCESSOR = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
44 |
|
45 |
|
46 |
-
def
|
47 |
if image is None:
|
48 |
return None, None
|
49 |
|
@@ -51,13 +51,6 @@ def save_original_image(image: gr.Image):
|
|
51 |
size = min(size) if isinstance(size, tuple) else size
|
52 |
image = resize(image, size)
|
53 |
|
54 |
-
return image, image.copy()
|
55 |
-
|
56 |
-
|
57 |
-
def prepare_image(image: gr.Image):
|
58 |
-
if image is None:
|
59 |
-
return None, None
|
60 |
-
|
61 |
PROCESSOR.image_processor.do_normalize = False
|
62 |
image_tensor = PROCESSOR(images=[image], return_tensors="pt", padding=True)
|
63 |
PROCESSOR.image_processor.do_normalize = True
|
@@ -87,9 +80,8 @@ with gr.Blocks(analytics_enabled=True, title=PAPER_TITLE, theme="soft") as demo:
|
|
87 |
gr.Markdown(MARKDOWN_DESCRIPTION)
|
88 |
with gr.Row():
|
89 |
with gr.Column():
|
90 |
-
curr_image = gr.Image(
|
91 |
-
|
92 |
-
label="orig. image", type="pil", visible=False, interactive=False
|
93 |
)
|
94 |
alpha_slider = gr.Slider(0.0, 1.0, value=0.7, step=0.1, label="alpha")
|
95 |
with gr.Row():
|
@@ -97,9 +89,13 @@ with gr.Blocks(analytics_enabled=True, title=PAPER_TITLE, theme="soft") as demo:
|
|
97 |
run_button = gr.Button(value="Submit", variant="primary")
|
98 |
with gr.Column():
|
99 |
output_label = gr.Label(label="output", num_top_classes=5)
|
|
|
|
|
|
|
|
|
100 |
examples = gr.Examples(
|
101 |
examples=glob(os.path.join(os.path.dirname(__file__), "examples", "*.jpg")),
|
102 |
-
inputs=[
|
103 |
outputs=[output_label],
|
104 |
fn=image_inference,
|
105 |
cache_examples=True,
|
@@ -108,10 +104,10 @@ with gr.Blocks(analytics_enabled=True, title=PAPER_TITLE, theme="soft") as demo:
|
|
108 |
|
109 |
# INTERACTIONS
|
110 |
# - change
|
111 |
-
|
112 |
|
113 |
# - upload
|
114 |
-
curr_image.upload(
|
115 |
curr_image.upload(lambda: None, [], [output_label])
|
116 |
|
117 |
# - clear
|
|
|
43 |
PROCESSOR = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
44 |
|
45 |
|
46 |
+
def image_preprocess(image: gr.Image):
|
47 |
if image is None:
|
48 |
return None, None
|
49 |
|
|
|
51 |
size = min(size) if isinstance(size, tuple) else size
|
52 |
image = resize(image, size)
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
PROCESSOR.image_processor.do_normalize = False
|
55 |
image_tensor = PROCESSOR(images=[image], return_tensors="pt", padding=True)
|
56 |
PROCESSOR.image_processor.do_normalize = True
|
|
|
80 |
gr.Markdown(MARKDOWN_DESCRIPTION)
|
81 |
with gr.Row():
|
82 |
with gr.Column():
|
83 |
+
curr_image = gr.Image(
|
84 |
+
label="input", type="pil", sources=["upload", "webcam", "clipboard"]
|
|
|
85 |
)
|
86 |
alpha_slider = gr.Slider(0.0, 1.0, value=0.7, step=0.1, label="alpha")
|
87 |
with gr.Row():
|
|
|
89 |
run_button = gr.Button(value="Submit", variant="primary")
|
90 |
with gr.Column():
|
91 |
output_label = gr.Label(label="output", num_top_classes=5)
|
92 |
+
|
93 |
+
_orig_image = gr.Image(label="original image", type="pil", visible=False, interactive=False)
|
94 |
+
_example_image = gr.Image(label="example image", type="pil", visible=False, interactive=False)
|
95 |
+
|
96 |
examples = gr.Examples(
|
97 |
examples=glob(os.path.join(os.path.dirname(__file__), "examples", "*.jpg")),
|
98 |
+
inputs=[_example_image],
|
99 |
outputs=[output_label],
|
100 |
fn=image_inference,
|
101 |
cache_examples=True,
|
|
|
104 |
|
105 |
# INTERACTIONS
|
106 |
# - change
|
107 |
+
_example_image.change(image_preprocess, [_example_image], [curr_image, _orig_image])
|
108 |
|
109 |
# - upload
|
110 |
+
curr_image.upload(image_preprocess, [curr_image], [curr_image, _orig_image])
|
111 |
curr_image.upload(lambda: None, [], [output_label])
|
112 |
|
113 |
# - clear
|
requirements.txt
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
-
torch
|
2 |
-
torchvision
|
3 |
-
faiss-cpu
|
4 |
-
flair
|
5 |
-
gradio==4.
|
6 |
-
inflect
|
7 |
-
nltk
|
8 |
-
|
|
|
|
1 |
+
torch>=2,<3
|
2 |
+
torchvision>=0,<1
|
3 |
+
faiss-cpu>=1,<2
|
4 |
+
flair>=0,<1
|
5 |
+
gradio==4.44.1
|
6 |
+
inflect>=7,<8
|
7 |
+
nltk>=3,<4
|
8 |
+
pyarrow>=18,<19
|
9 |
+
transformers>=4,<5
|