arxivgpt kim commited on
Commit
815368c
โ€ข
1 Parent(s): 63e5794

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -134
app.py CHANGED
@@ -4,62 +4,14 @@ import torch.nn.functional as F
4
  from torchvision.transforms.functional import normalize
5
  from huggingface_hub import hf_hub_download
6
  import gradio as gr
7
- from gradio_imageslider import ImageSlider
8
- from briarmbg import BriaRMBG
9
- import PIL
10
  from PIL import Image
11
- from typing import Tuple
12
 
13
- import os
14
- import requests
15
- from moviepy.editor import VideoFileClip
16
- from moviepy.audio.AudioClip import AudioClip
17
-
18
- def search_pexels_images(query):
19
- API_KEY = os.getenv("API_KEY")
20
- url = f"https://api.pexels.com/v1/search?query={query}&per_page=80"
21
- headers = {"Authorization": API_KEY}
22
- response = requests.get(url, headers=headers)
23
- data = response.json()
24
-
25
- # ๊ณ ํ•ด์ƒ๋„ ์ด๋ฏธ์ง€ URL๋งŒ ์„ ํƒํ•˜์—ฌ ๋ฆฌ์ŠคํŠธ ์ƒ์„ฑ
26
- images_urls = []
27
- for photo in data.get('photos', []):
28
- # 'large2x' ํ•ด์ƒ๋„์˜ ์ด๋ฏธ์ง€๊ฐ€ ์ œ๊ณต๋˜๋Š” ๊ฒฝ์šฐ, ํ•ด๋‹น URL ์‚ฌ์šฉ
29
- if 'src' in photo and 'large2x' in photo['src']:
30
- images_urls.append(photo['src']['large2x'])
31
- # 'large2x' ํ•ด์ƒ๋„์˜ ์ด๋ฏธ์ง€๊ฐ€ ์—†๋Š” ๊ฒฝ์šฐ, 'large' ๋˜๋Š” 'original'์„ ๋Œ€์ฒด๋กœ ์‚ฌ์šฉ
32
- elif 'large' in photo['src']:
33
- images_urls.append(photo['src']['large'])
34
- elif 'original' in photo['src']:
35
- images_urls.append(photo['src']['original'])
36
-
37
- return images_urls
38
-
39
-
40
- def show_search_results(query):
41
- images_urls = search_pexels_images(query)
42
- return images_urls
43
-
44
-
45
- net=BriaRMBG()
46
- # model_path = "./model1.pth"
47
- model_path = hf_hub_download("briaai/RMBG-1.4", 'model.pth')
48
- if torch.cuda.is_available():
49
- net.load_state_dict(torch.load(model_path))
50
- net=net.cuda()
51
- else:
52
- net.load_state_dict(torch.load(model_path,map_location="cpu"))
53
- net.eval()
54
-
55
-
56
  def resize_image(image):
57
  image = image.convert('RGB')
58
  model_input_size = (1024, 1024)
59
  image = image.resize(model_input_size, Image.BILINEAR)
60
  return image
61
 
62
-
63
  def process(image):
64
  # ์ด๋ฏธ์ง€๊ฐ€ numpy ๋ฐฐ์—ด์ธ ๊ฒฝ์šฐ์—๋งŒ PIL.Image ๊ฐ์ฒด๋กœ ๋ณ€ํ™˜
65
  if isinstance(image, np.ndarray):
@@ -78,7 +30,16 @@ def process(image):
78
  if torch.cuda.is_available():
79
  im_tensor = im_tensor.cuda()
80
 
81
- # inference
 
 
 
 
 
 
 
 
 
82
  result = net(im_tensor)
83
  # post process
84
  result = torch.squeeze(F.interpolate(result[0][0], size=(h, w), mode='bilinear'), 0)
@@ -94,89 +55,12 @@ def process(image):
94
 
95
  return new_im
96
 
97
- def calculate_position(org_size, add_size, position):
98
- if position == "์ƒ๋‹จ ์ขŒ์ธก":
99
- return (0, 0)
100
- elif position == "์ƒ๋‹จ ๊ฐ€์šด๋ฐ":
101
- return ((org_size[0] - add_size[0]) // 2, 0)
102
- elif position == "์ƒ๋‹จ ์šฐ์ธก":
103
- return (org_size[0] - add_size[0], 0)
104
- elif position == "์ค‘์•™ ์ขŒ์ธก":
105
- return (0, (org_size[1] - add_size[1]) // 2)
106
- elif position == "์ค‘์•™ ๊ฐ€์šด๋ฐ":
107
- return ((org_size[0] - add_size[0]) // 2, (org_size[1] - add_size[1]) // 2)
108
- elif position == "์ค‘์•™ ์šฐ์ธก":
109
- return (org_size[0] - add_size[0], (org_size[1] - add_size[1]) // 2)
110
- elif position == "ํ•˜๋‹จ ์ขŒ์ธก":
111
- return (0, org_size[1] - add_size[1])
112
- elif position == "ํ•˜๋‹จ ๊ฐ€์šด๋ฐ":
113
- return ((org_size[0] - add_size[0]) // 2, org_size[1] - add_size[1])
114
- elif position == "ํ•˜๋‹จ ์šฐ์ธก":
115
- return (org_size[0] - add_size[0], org_size[1] - add_size[1])
116
-
117
-
118
- def merge(org_image, add_image, scale, position, display_size):
119
- # ์‚ฌ์šฉ์ž๊ฐ€ ์„ ํƒํ•œ ๋””์Šคํ”Œ๋ ˆ์ด ํฌ๊ธฐ์— ๋”ฐ๋ผ ๊ฒฐ๊ณผ ์ด๋ฏธ์ง€ ํฌ๊ธฐ ์กฐ์ ˆ
120
- display_width, display_height = map(int, display_size.split('x'))
121
-
122
- # ์ด๋ฏธ์ง€ ๋ณ‘ํ•ฉ ๋กœ์ง
123
- scale_percentage = scale / 100.0
124
- new_size = (int(add_image.width * scale_percentage), int(add_image.height * scale_percentage))
125
- add_image = add_image.resize(new_size, Image.Resampling.LANCZOS)
126
-
127
- position = calculate_position(org_image.size, add_image.size, position)
128
- merged_image = Image.new("RGBA", org_image.size)
129
- merged_image.paste(org_image, (0, 0))
130
- merged_image.paste(add_image, position, add_image)
131
-
132
- # ๊ฒฐ๊ณผ ์ด๋ฏธ์ง€ ๋””์Šคํ”Œ๋ ˆ์ด ํฌ๊ธฐ ์กฐ์ ˆ
133
- final_image = merged_image.resize((display_width, display_height), Image.Resampling.LANCZOS)
134
-
135
- return final_image
136
-
137
-
138
  with gr.Blocks() as demo:
139
- with gr.Tab("Background Removal"):
140
- with gr.Column():
141
- gr.Markdown("๋ˆ„๋ผ๋”ฐ๊ธฐ์˜ ์™• '๋ˆ„ํ‚น'(Nuking)")
142
- gr.HTML('''
143
- <p style="margin-bottom: 10px; font-size: 94%">
144
- This is a demo for BRIA RMBG 1.4 that using
145
- <a href="https://huggingface.co/briaai/RMBG-1.4" target="_blank">BRIA RMBG-1.4 image matting model</a> as backbone.
146
- </p>
147
- ''')
148
- input_image = gr.Image(type="pil")
149
- output_image = gr.Image()
150
- process_button = gr.Button("Remove Background")
151
- process_button.click(fn=process, inputs=input_image, outputs=output_image)
152
-
153
- with gr.Tab("Merge"):
154
- with gr.Column():
155
- org_image = gr.Image(label="Background", type='pil', image_mode='RGBA', height=400) # ์˜ˆ์‹œ๋กœ ๋†’์ด ์กฐ์ ˆ
156
- add_image = gr.Image(label="Foreground", type='pil', image_mode='RGBA', height=400) # ์˜ˆ์‹œ๋กœ ๋†’์ด ์กฐ์ ˆ
157
- scale = gr.Slider(minimum=10, maximum=200, step=1, value=100, label="Scale of Foreground Image (%)")
158
- position = gr.Radio(choices=["์ค‘์•™ ๊ฐ€์šด๋ฐ", "์ƒ๋‹จ ์ขŒ์ธก", "์ƒ๋‹จ ๊ฐ€์šด๋ฐ", "์ƒ๋‹จ ์šฐ์ธก", "์ค‘์•™ ์ขŒ์ธก", "์ค‘์•™ ์šฐ์ธก", "ํ•˜๋‹จ ์ขŒ์ธก", "ํ•˜๋‹จ ๊ฐ€์šด๋ฐ", "ํ•˜๋‹จ ์šฐ์ธก"], value="์ค‘์•™ ๊ฐ€์šด๋ฐ", label="Position of Foreground Image")
159
- display_size = gr.Textbox(value="1024x768", label="Display Size (Width x Height)")
160
- btn_merge = gr.Button("Merge Images")
161
- result_merge = gr.Image()
162
-
163
- btn_merge.click(
164
- fn=merge,
165
- inputs=[org_image, add_image, scale, position, display_size],
166
- outputs=result_merge,
167
- )
168
-
169
-
170
- with gr.TabItem("Image Search"):
171
- with gr.Column():
172
- gr.Markdown("### FREE Image Search")
173
- search_query = gr.Textbox(label="์‚ฌ์ง„ ๊ฒ€์ƒ‰")
174
- search_btn = gr.Button("๊ฒ€์ƒ‰")
175
- images_output = gr.Gallery(label="๊ฒ€์ƒ‰ ๊ฒฐ๊ณผ ์ด๋ฏธ์ง€")
176
- search_btn.click(
177
- fn=show_search_results,
178
- inputs=search_query,
179
- outputs=images_output
180
- )
181
-
182
- demo.launch()
 
4
  from torchvision.transforms.functional import normalize
5
  from huggingface_hub import hf_hub_download
6
  import gradio as gr
 
 
 
7
  from PIL import Image
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  def resize_image(image):
10
  image = image.convert('RGB')
11
  model_input_size = (1024, 1024)
12
  image = image.resize(model_input_size, Image.BILINEAR)
13
  return image
14
 
 
15
  def process(image):
16
  # ์ด๋ฏธ์ง€๊ฐ€ numpy ๋ฐฐ์—ด์ธ ๊ฒฝ์šฐ์—๋งŒ PIL.Image ๊ฐ์ฒด๋กœ ๋ณ€ํ™˜
17
  if isinstance(image, np.ndarray):
 
30
  if torch.cuda.is_available():
31
  im_tensor = im_tensor.cuda()
32
 
33
+ # ๋ชจ๋ธ ๋กœ๋”ฉ ๋ฐ ์˜ˆ์ธก
34
+ net = BriaRMBG()
35
+ model_path = hf_hub_download("briaai/RMBG-1.4", 'model.pth')
36
+ if torch.cuda.is_available():
37
+ net.load_state_dict(torch.load(model_path))
38
+ net = net.cuda()
39
+ else:
40
+ net.load_state_dict(torch.load(model_path, map_location="cpu"))
41
+ net.eval()
42
+
43
  result = net(im_tensor)
44
  # post process
45
  result = torch.squeeze(F.interpolate(result[0][0], size=(h, w), mode='bilinear'), 0)
 
55
 
56
  return new_im
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  with gr.Blocks() as demo:
59
+ with gr.Column():
60
+ gr.Markdown("๋ˆ„๋ผ๋”ฐ๊ธฐ์˜ ์™• '๋ˆ„ํ‚น'(Nuking)")
61
+ input_image = gr.Image(type="pil")
62
+ output_image = gr.Image()
63
+ process_button = gr.Button("Remove Background")
64
+ process_button.click(fn=process, inputs=input_image, outputs=output_image)
65
+
66
+ demo.launch()