pierre-catie
commited on
Commit
•
2b41a23
1
Parent(s):
ec283a9
Upload 10 files
Browse files- README.md +5 -5
- Vaniila(1).png +0 -0
- animos(1).jpeg +0 -0
- app.py +98 -0
- assets_astronaut(1).png +0 -0
- catie(3).png +0 -0
- dej(1).jpeg +0 -0
- rayure(1).jpeg +0 -0
- requirements.txt +8 -0
- voitures.jpg +0 -0
README.md
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
---
|
2 |
title: Nestor
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
11 |
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
title: Nestor
|
3 |
+
emoji: 👁
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: gray
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.32.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
11 |
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
Vaniila(1).png
ADDED
animos(1).jpeg
ADDED
app.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import Owlv2Processor, Owlv2ForObjectDetection, pipeline
|
4 |
+
import spaces
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
# Use GPU if available
|
8 |
+
if torch.cuda.is_available():
|
9 |
+
device = torch.device("cuda")
|
10 |
+
else:
|
11 |
+
device = torch.device("cpu")
|
12 |
+
|
13 |
+
model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble").to(device)
|
14 |
+
processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16-ensemble")
|
15 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-fr-en")
|
16 |
+
|
17 |
+
|
18 |
+
@spaces.GPU
|
19 |
+
def query_image(img, description, score_threshold):
|
20 |
+
description=description
|
21 |
+
|
22 |
+
translation_result = translator(description ,
|
23 |
+
src_lang="fr",
|
24 |
+
tgt_lang="en")
|
25 |
+
description = translation_result[0]['translation_text']
|
26 |
+
|
27 |
+
|
28 |
+
description = description.split(",")
|
29 |
+
|
30 |
+
size = max(img.shape[:2])
|
31 |
+
target_sizes = torch.Tensor([[size, size]])
|
32 |
+
inputs = processor(text=description, images=img, return_tensors="pt").to(device)
|
33 |
+
|
34 |
+
with torch.no_grad():
|
35 |
+
outputs = model(**inputs)
|
36 |
+
|
37 |
+
outputs.logits = outputs.logits.cpu()
|
38 |
+
outputs.pred_boxes = outputs.pred_boxes.cpu()
|
39 |
+
results = processor.post_process_object_detection(outputs=outputs, target_sizes=target_sizes)
|
40 |
+
boxes, scores, labels = results[0]["boxes"], results[0]["scores"], results[0]["labels"]
|
41 |
+
|
42 |
+
result_labels = []
|
43 |
+
for box, score, label in zip(boxes, scores, labels):
|
44 |
+
box = [int(i) for i in box.tolist()]
|
45 |
+
if score < score_threshold:
|
46 |
+
continue
|
47 |
+
result_labels.append((box, description[label.item()]))
|
48 |
+
return img, result_labels
|
49 |
+
|
50 |
+
|
51 |
+
logo = r"""
|
52 |
+
<center><img src='https://doubiiu.github.io/projects/ToonCrafter/static/logo2.png' alt='ToonCrafter logo' style="width:280px; margin-bottom:2px"></center>
|
53 |
+
"""
|
54 |
+
|
55 |
+
|
56 |
+
gr.Markdown(logo)
|
57 |
+
|
58 |
+
description = """
|
59 |
+
|
60 |
+
***logo du catie***.
|
61 |
+
|
62 |
+
Détecter des objets en vocabulaire ouvert est une tâche difficile, étant donné la grande variété d'images possibles et de sujets possibles, cependant de récents modèles permettent de faire cela.
|
63 |
+
|
64 |
+
En voici notre adaptation en français.
|
65 |
+
Utilisez ce démonstrateur pour requêter n'importe quelle image avec la description textuelle d'un objet.
|
66 |
+
|
67 |
+
Pour s'en servir, chargez votre image et entrez des descritions séparées par des virgules.
|
68 |
+
Vous pouvez utiliser le seuil pour filtrer les détections avec une faible probabilité.
|
69 |
+
|
70 |
+
|
71 |
+
Si vous avez besoin d'un tel modèle, qui fonctionnera en temps réel, sera spécialisé sur vos propres données, de petite taille donc économe en énergie, contactez-nous : contact@catie.fr
|
72 |
+
|
73 |
+
"""
|
74 |
+
#image_path="./Vaniila.png"
|
75 |
+
#absolute_path = Path(image_path).resolve()
|
76 |
+
|
77 |
+
#gr.Image('Vaniila.png',
|
78 |
+
# height=500,width=176,
|
79 |
+
# show_download_button=False)
|
80 |
+
|
81 |
+
|
82 |
+
demo = gr.Interface(
|
83 |
+
query_image,
|
84 |
+
inputs=[gr.Image(), "text", gr.Slider(0, 1, value=0.1)],
|
85 |
+
outputs="annotatedimage",
|
86 |
+
title="Détection d'objets en vocabulaire ouvert.",
|
87 |
+
description=description,
|
88 |
+
#css=".gradio-container {background: url('file=Vaniila.png')}",
|
89 |
+
examples=[
|
90 |
+
["voitures.jpg", "voiture orange, roues", 0.21],
|
91 |
+
["animos.jpeg", "lion, chimpanzé", 0.37],
|
92 |
+
["dej.jpeg", "jambon, assiette, croissant", 0.3],
|
93 |
+
],
|
94 |
+
|
95 |
+
)
|
96 |
+
|
97 |
+
|
98 |
+
demo.launch()
|
assets_astronaut(1).png
ADDED
catie(3).png
ADDED
dej(1).jpeg
ADDED
rayure(1).jpeg
ADDED
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy>=1.18.5
|
2 |
+
torch>=1.7.0
|
3 |
+
torchvision>=0.8.1
|
4 |
+
git+https://github.com/huggingface/transformers.git
|
5 |
+
scipy
|
6 |
+
spaces
|
7 |
+
sentencepiece
|
8 |
+
pathlib
|
voitures.jpg
ADDED