altndrr commited on
Commit
aeb9633
β€’
1 Parent(s): a2424b0
Files changed (4) hide show
  1. LICENCE +1 -1
  2. README.md +1 -1
  3. app.py +10 -14
  4. requirements.txt +9 -8
LICENCE CHANGED
@@ -1,6 +1,6 @@
1
  The MIT License (MIT)
2
 
3
- Copyright (c) 2023 Alessandro Conti
4
 
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
  of this software and associated documentation files (the "Software"), to deal
 
1
  The MIT License (MIT)
2
 
3
+ Copyright (c) 2024 Alessandro Conti
4
 
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
  of this software and associated documentation files (the "Software"), to deal
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🌍
4
  colorFrom: green
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: '4.36.0'
8
  python_version: '3.9'
9
  app_file: app.py
10
  pinned: false
 
4
  colorFrom: green
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: '4.44.1'
8
  python_version: '3.9'
9
  app_file: app.py
10
  pinned: false
app.py CHANGED
@@ -43,7 +43,7 @@ MODEL = AutoModel.from_pretrained("altndrr/cased", trust_remote_code=True).to(DE
43
  PROCESSOR = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
44
 
45
 
46
- def save_original_image(image: gr.Image):
47
  if image is None:
48
  return None, None
49
 
@@ -51,13 +51,6 @@ def save_original_image(image: gr.Image):
51
  size = min(size) if isinstance(size, tuple) else size
52
  image = resize(image, size)
53
 
54
- return image, image.copy()
55
-
56
-
57
- def prepare_image(image: gr.Image):
58
- if image is None:
59
- return None, None
60
-
61
  PROCESSOR.image_processor.do_normalize = False
62
  image_tensor = PROCESSOR(images=[image], return_tensors="pt", padding=True)
63
  PROCESSOR.image_processor.do_normalize = True
@@ -87,9 +80,8 @@ with gr.Blocks(analytics_enabled=True, title=PAPER_TITLE, theme="soft") as demo:
87
  gr.Markdown(MARKDOWN_DESCRIPTION)
88
  with gr.Row():
89
  with gr.Column():
90
- curr_image = gr.Image(label="input", type="pil")
91
- _orig_image = gr.Image(
92
- label="orig. image", type="pil", visible=False, interactive=False
93
  )
94
  alpha_slider = gr.Slider(0.0, 1.0, value=0.7, step=0.1, label="alpha")
95
  with gr.Row():
@@ -97,9 +89,13 @@ with gr.Blocks(analytics_enabled=True, title=PAPER_TITLE, theme="soft") as demo:
97
  run_button = gr.Button(value="Submit", variant="primary")
98
  with gr.Column():
99
  output_label = gr.Label(label="output", num_top_classes=5)
 
 
 
 
100
  examples = gr.Examples(
101
  examples=glob(os.path.join(os.path.dirname(__file__), "examples", "*.jpg")),
102
- inputs=[_orig_image],
103
  outputs=[output_label],
104
  fn=image_inference,
105
  cache_examples=True,
@@ -108,10 +104,10 @@ with gr.Blocks(analytics_enabled=True, title=PAPER_TITLE, theme="soft") as demo:
108
 
109
  # INTERACTIONS
110
  # - change
111
- _orig_image.change(prepare_image, [_orig_image], [curr_image, _orig_image])
112
 
113
  # - upload
114
- curr_image.upload(save_original_image, [curr_image], [curr_image, _orig_image])
115
  curr_image.upload(lambda: None, [], [output_label])
116
 
117
  # - clear
 
43
  PROCESSOR = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
44
 
45
 
46
+ def image_preprocess(image: gr.Image):
47
  if image is None:
48
  return None, None
49
 
 
51
  size = min(size) if isinstance(size, tuple) else size
52
  image = resize(image, size)
53
 
 
 
 
 
 
 
 
54
  PROCESSOR.image_processor.do_normalize = False
55
  image_tensor = PROCESSOR(images=[image], return_tensors="pt", padding=True)
56
  PROCESSOR.image_processor.do_normalize = True
 
80
  gr.Markdown(MARKDOWN_DESCRIPTION)
81
  with gr.Row():
82
  with gr.Column():
83
+ curr_image = gr.Image(
84
+ label="input", type="pil", sources=["upload", "webcam", "clipboard"]
 
85
  )
86
  alpha_slider = gr.Slider(0.0, 1.0, value=0.7, step=0.1, label="alpha")
87
  with gr.Row():
 
89
  run_button = gr.Button(value="Submit", variant="primary")
90
  with gr.Column():
91
  output_label = gr.Label(label="output", num_top_classes=5)
92
+
93
+ _orig_image = gr.Image(label="original image", type="pil", visible=False, interactive=False)
94
+ _example_image = gr.Image(label="example image", type="pil", visible=False, interactive=False)
95
+
96
  examples = gr.Examples(
97
  examples=glob(os.path.join(os.path.dirname(__file__), "examples", "*.jpg")),
98
+ inputs=[_example_image],
99
  outputs=[output_label],
100
  fn=image_inference,
101
  cache_examples=True,
 
104
 
105
  # INTERACTIONS
106
  # - change
107
+ _example_image.change(image_preprocess, [_example_image], [curr_image, _orig_image])
108
 
109
  # - upload
110
+ curr_image.upload(image_preprocess, [curr_image], [curr_image, _orig_image])
111
  curr_image.upload(lambda: None, [], [output_label])
112
 
113
  # - clear
requirements.txt CHANGED
@@ -1,8 +1,9 @@
1
- torch==2.0.1
2
- torchvision==0.15.2
3
- faiss-cpu==1.7.4
4
- flair==0.13.0
5
- gradio==4.7.1
6
- inflect==7.0.0
7
- nltk==3.8.1
8
- transformers==4.35.1
 
 
1
+ torch>=2,<3
2
+ torchvision>=0,<1
3
+ faiss-cpu>=1,<2
4
+ flair>=0,<1
5
+ gradio==4.44.1
6
+ inflect>=7,<8
7
+ nltk>=3,<4
8
+ pyarrow>=18,<19
9
+ transformers>=4,<5