Spaces:
Running
on
Zero
Running
on
Zero
改修
Browse files- app.py +99 -67
- requirements.txt +2 -2
app.py
CHANGED
@@ -1,86 +1,118 @@
|
|
1 |
import gradio as gr
|
2 |
-
import firebase_admin
|
3 |
-
from firebase_admin import credentials, storage
|
4 |
-
import tempfile
|
5 |
-
import os
|
6 |
import io
|
7 |
from PIL import Image
|
8 |
import base64
|
9 |
-
from scripts.process_utils import initialize, process_image_as_base64
|
10 |
from scripts.anime import init_model
|
11 |
from scripts.generate_prompt import load_wd14_tagger_model
|
12 |
-
import uuid
|
13 |
-
import time
|
14 |
|
15 |
# 初期化
|
16 |
-
initialize(_use_local=False, use_gpu=True, use_dotenv=
|
17 |
init_model(use_local=False)
|
18 |
load_wd14_tagger_model()
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
30 |
|
31 |
-
|
|
|
32 |
sotai_image = Image.open(io.BytesIO(base64.b64decode(sotai_image_data)))
|
33 |
sketch_image = Image.open(io.BytesIO(base64.b64decode(sketch_image_data)))
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
37 |
|
38 |
-
|
39 |
-
# 一時ファイルを作成
|
40 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
|
41 |
-
image.save(temp_file, format="PNG")
|
42 |
-
temp_file_path = temp_file.name
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
-
def process_image(input_image, mode, weight1, weight2):
|
60 |
-
# 既存の画像処理ロジック
|
61 |
-
sotai_image, sketch_image = process_image_as_base64(input_image, mode, weight1, weight2)
|
62 |
-
|
63 |
-
# Firebase に画像ペアを保存し、URLを取得
|
64 |
-
urls = save_image_pair_to_firebase(sotai_image, sketch_image)
|
65 |
-
|
66 |
-
return urls['sotai'], urls['sketch']
|
67 |
|
68 |
-
# Gradio インターフェースの定義
|
69 |
-
iface = gr.Interface(
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
)
|
84 |
|
85 |
-
# Hugging Face Spacesでデプロイする場合
|
86 |
-
iface.queue().launch()
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
2 |
import io
|
3 |
from PIL import Image
|
4 |
import base64
|
5 |
+
from scripts.process_utils import initialize, process_image_as_base64, image_to_base64
|
6 |
from scripts.anime import init_model
|
7 |
from scripts.generate_prompt import load_wd14_tagger_model
|
|
|
|
|
8 |
|
9 |
# 初期化
|
10 |
+
initialize(_use_local=False, use_gpu=True, use_dotenv=True)
|
11 |
init_model(use_local=False)
|
12 |
load_wd14_tagger_model()
|
13 |
|
14 |
+
def process_image(input_image, mode, weight1=None, weight2=None):
|
15 |
+
print(f"Processing image with mode={mode}, weight1={weight1}, weight2={weight2}")
|
16 |
+
# 既存の画像処理ロジック
|
17 |
+
if mode == "original":
|
18 |
+
sotai_image, sketch_image = process_image_as_base64(input_image, mode, None, None)
|
19 |
+
elif mode == "refine":
|
20 |
+
sotai_image, sketch_image = process_image_as_base64(input_image, mode, weight1, weight2)
|
21 |
+
|
22 |
+
# テスト用に、Base64データを返す
|
23 |
+
sotai_image = image_to_base64(input_image)
|
24 |
+
sketch_image = image_to_base64(input_image)
|
25 |
+
|
26 |
+
return sotai_image, sketch_image
|
27 |
|
28 |
+
def mix_images(sotai_image_data, sketch_image_data, opacity1, opacity2):
|
29 |
+
# Base64からPILイメージに変換
|
30 |
sotai_image = Image.open(io.BytesIO(base64.b64decode(sotai_image_data)))
|
31 |
sketch_image = Image.open(io.BytesIO(base64.b64decode(sketch_image_data)))
|
32 |
+
# 画像を合成
|
33 |
+
mixed_image = Image.new('RGBA', sotai_image.size, (0, 0, 0, 0))
|
34 |
+
opacity_mask1 = Image.new('L', sotai_image.size, int(opacity1 * 255))
|
35 |
+
opacity_mask2 = Image.new('L', sotai_image.size, int(opacity2 * 255))
|
36 |
+
mixed_image.paste(sotai_image, (0, 0), mask=opacity_mask1)
|
37 |
+
mixed_image.paste(sketch_image, (0, 0), mask=opacity_mask2)
|
38 |
|
39 |
+
return mixed_image
|
|
|
|
|
|
|
|
|
40 |
|
41 |
+
with gr.Blocks() as demo:
|
42 |
+
# title
|
43 |
+
gr.HTML("<h1>Image2Body demo</h1>")
|
44 |
+
# description
|
45 |
+
gr.HTML("<p>Upload an image and select processing options to generate body and sketch images.</p>")
|
46 |
+
# interface
|
47 |
+
submit = None
|
48 |
+
with gr.Row():
|
49 |
+
with gr.Column() as input_col:
|
50 |
+
with gr.Tab("original"):
|
51 |
+
original_input = [
|
52 |
+
gr.Image(type="pil", label="Input Image"),
|
53 |
+
gr.Text("original", label="Mode", visible=False),
|
54 |
+
]
|
55 |
+
original_submit = gr.Button("Submit", variant="primary")
|
56 |
+
with gr.Tab("refine"):
|
57 |
+
refine_input = [
|
58 |
+
gr.Image(type="pil", label="Input Image"),
|
59 |
+
gr.Text("refine", label="Mode", visible=False),
|
60 |
+
gr.Slider(0, 2, value=0.6, step=0.05, label="Weight 1 (Sketch)"),
|
61 |
+
gr.Slider(0, 1, value=0.05, step=0.025, label="Weight 2 (Body)")
|
62 |
+
]
|
63 |
+
refine_submit = gr.Button("Submit", variant="primary")
|
64 |
+
with gr.Column() as output_col:
|
65 |
+
sotai_image_data = gr.Text(label="Sotai Image data", visible=False)
|
66 |
+
sketch_image_data = gr.Text(label="Sketch Image data", visible=False)
|
67 |
+
mixed_image = gr.Image(label="Output Image", elem_id="output_image")
|
68 |
+
opacity_slider1 = gr.Slider(0, 1, value=0.5, step=0.05, label="Opacity (Sotai)")
|
69 |
+
opacity_slider2 = gr.Slider(0, 1, value=0.5, step=0.05, label="Opacity (Sketch)")
|
70 |
+
|
71 |
+
original_submit.click(
|
72 |
+
process_image,
|
73 |
+
inputs=original_input,
|
74 |
+
outputs=[sotai_image_data, sketch_image_data]
|
75 |
+
)
|
76 |
+
refine_submit.click(
|
77 |
+
process_image,
|
78 |
+
inputs=refine_input,
|
79 |
+
outputs=[sotai_image_data, sketch_image_data]
|
80 |
+
)
|
81 |
+
sotai_image_data.change(
|
82 |
+
mix_images,
|
83 |
+
inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2],
|
84 |
+
outputs=mixed_image
|
85 |
+
)
|
86 |
+
opacity_slider1.change(
|
87 |
+
mix_images,
|
88 |
+
inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2],
|
89 |
+
outputs=mixed_image
|
90 |
+
)
|
91 |
+
opacity_slider2.change(
|
92 |
+
mix_images,
|
93 |
+
inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2],
|
94 |
+
outputs=mixed_image
|
95 |
+
)
|
96 |
+
|
97 |
+
demo.launch()
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
+
# # Gradio インターフェースの定義
|
101 |
+
# iface = gr.Interface(
|
102 |
+
# fn=process_image,
|
103 |
+
# inputs=[
|
104 |
+
# gr.Image(type="pil", label="Input Image"),
|
105 |
+
# gr.Radio(["original", "refine"], label="Mode", value="original"),
|
106 |
+
# gr.Slider(0, 2, value=0.6, step=0.05, label="Weight 1 (Sketch)"),
|
107 |
+
# gr.Slider(0, 1, value=0.05, step=0.025, label="Weight 2 (Body)")
|
108 |
+
# ],
|
109 |
+
# outputs=[
|
110 |
+
# gr.Text(label="Sotai Image URL"),
|
111 |
+
# gr.Text(label="Sketch Image URL")
|
112 |
+
# ],
|
113 |
+
# title="Image2Body API",
|
114 |
+
# description="Upload an image and select processing options to generate body and sketch images."
|
115 |
+
# )
|
116 |
|
117 |
+
# # Hugging Face Spacesでデプロイする場合
|
118 |
+
# iface.queue().launch()
|
requirements.txt
CHANGED
@@ -7,8 +7,8 @@ diffusers==0.27.0 # pth file cannot be loaded in the latest version
|
|
7 |
Flask==3.0.3
|
8 |
Flask-Cors==4.0.0
|
9 |
Flask-SocketIO==5.3.6
|
10 |
-
gradio==
|
11 |
-
|
12 |
kornia==0.7.1
|
13 |
numpy==1.23.5
|
14 |
opencv-python==4.9.0.80
|
|
|
7 |
Flask==3.0.3
|
8 |
Flask-Cors==4.0.0
|
9 |
Flask-SocketIO==5.3.6
|
10 |
+
gradio==5.5.0
|
11 |
+
huggingface-hub==0.25.2
|
12 |
kornia==0.7.1
|
13 |
numpy==1.23.5
|
14 |
opencv-python==4.9.0.80
|