Yangtao Wang commited on
Commit
f555962
1 Parent(s): 3501709

TokenCut demo

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +20 -37
  3. packages.txt +4 -0
  4. requirements.txt +14 -0
README.md CHANGED
@@ -9,4 +9,4 @@ app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
9
  pinned: false
10
  ---
11
 
12
+ This Demo is the TokenCut demo, the original demo is from https://huggingface.co/spaces/akhaliq/TokenCut. Thanks for Ahsen Khaliq's nicely contribution.
app.py CHANGED
@@ -1,39 +1,22 @@
1
- import requests
2
- import pandas as pd
3
  import gradio as gr
4
- from huggingface_hub.hf_api import SpaceInfo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
-
7
-
8
- path = f"https://huggingface.co/api/spaces"
9
-
10
-
11
-
12
- def get_blocks_party_spaces():
13
- r = requests.get(path)
14
- d = r.json()
15
- spaces = [SpaceInfo(**x) for x in d]
16
- blocks_spaces = {}
17
- for i in range(0,len(spaces)):
18
- if spaces[i].id.split('/')[0] == 'CVPR' and hasattr(spaces[i], 'likes') and spaces[i].id != 'CVPR/Leaderboard' and spaces[i].id != 'CVPR/README':
19
- blocks_spaces[spaces[i].id]=spaces[i].likes
20
- df = pd.DataFrame(
21
- [{"Spaces_Name": Spaces, "likes": likes} for Spaces,likes in blocks_spaces.items()])
22
- df = df.sort_values(by=['likes'],ascending=False)
23
- return df
24
-
25
-
26
- block = gr.Blocks()
27
-
28
- with block:
29
- gr.Markdown("""Leaderboard for the most popular CVPR Spaces. To learn more and join, see <a href="https://huggingface.co/CVPR" target="_blank" style="text-decoration: underline">CVPR Event</a>""")
30
- with gr.Tabs():
31
- with gr.TabItem("CVPR Leaderboard"):
32
- with gr.Row():
33
- data = gr.outputs.Dataframe(type="pandas")
34
- with gr.Row():
35
- data_run = gr.Button("Refresh")
36
- data_run.click(get_blocks_party_spaces, inputs=None, outputs=data)
37
-
38
- block.load(get_blocks_party_spaces, inputs=None, outputs=data)
39
- block.launch()
 
1
+ import os
 
2
  import gradio as gr
3
+ from pathlib import Path
4
+
5
+
6
+ os.system("git clone https://github.com/YangtaoWANG95/TokenCut.git")
7
+ os.chdir("TokenCut")
8
+ os.system("wget https://raw.githubusercontent.com/YangtaoWANG95/TokenCut/master/examples/VOC07_000064.jpg -O parrot.jpg")
9
+
10
+ def inference(img):
11
+ os.system("python main_tokencut.py --image_path "+img+" --visualize all --resize 480")
12
+ filename = Path(img).stem
13
+ return "./outputs/TokenCut-vit_small16_k/"+filename+"_TokenCut_attn.jpg","./outputs/TokenCut-vit_small16_k/"+filename+"_TokenCut_pred.jpg"
14
+
15
+ title="TokenCut"
16
+ description="Gradio demo for TokenCut: Self-Supervised Transformers for Unsupervised Object Discovery using Normalized Cut. To use it, simply upload your image or click on one of the examples to load them. We resize the smaller edge of the image to 480 to accelerate inference time. Read more at the links below"
17
+
18
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2202.11539' target='_blank'>Self-Supervised Transformers for Unsupervised Object Discovery using Normalized Cut</a> | <a href='https://github.com/YangtaoWANG95/TokenCut' target='_blank'>Github Repo</a></p>"
19
+
20
+ examples=[['parrot.jpg']]
21
+ gr.Interface(inference,gr.inputs.Image(type="filepath"),[gr.outputs.Image(type="file",label="TokenCut_attn"),gr.outputs.Image(type="file",label="TokenCut_predication")],title=title,description=description,article=article,examples=examples).launch(enable_queue=True)
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
packages.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
4
+
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ scipy>=1.5.0
4
+ matplotlib>=3.2.2
5
+ opencv-python>=4.1.2
6
+ tqdm>=4.41.0
7
+ scikit-image
8
+ catalyst
9
+ scikit-learn
10
+ pycocotools
11
+ matplotlib
12
+ pandas
13
+ timm==0.3.2
14
+ tensorboard