Update latihsekarang.py
Browse files- latihsekarang.py +119 -219
latihsekarang.py
CHANGED
@@ -1,24 +1,20 @@
|
|
1 |
import os
|
2 |
-
from huggingface_hub import whoami
|
3 |
-
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
4 |
-
import sys
|
5 |
-
|
6 |
-
|
7 |
-
sys.path.insert(0, os.getcwd())
|
8 |
-
|
9 |
-
import gradio as gr
|
10 |
-
from PIL import Image
|
11 |
-
import torch
|
12 |
import uuid
|
13 |
-
import os
|
14 |
import shutil
|
15 |
import json
|
16 |
import yaml
|
|
|
|
|
17 |
from slugify import slugify
|
18 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
|
|
19 |
|
|
|
|
|
|
|
20 |
sys.path.insert(0, "ai-toolkit")
|
21 |
from toolkit.job import get_job
|
|
|
22 |
|
23 |
MAX_IMAGES = 150
|
24 |
|
@@ -27,77 +23,57 @@ def load_captioning(uploaded_files, concept_sentence):
|
|
27 |
txt_files = [file for file in uploaded_files if file.endswith('.txt')]
|
28 |
txt_files_dict = {os.path.splitext(os.path.basename(txt_file))[0]: txt_file for txt_file in txt_files}
|
29 |
updates = []
|
|
|
30 |
if len(uploaded_images) <= 1:
|
31 |
-
raise
|
32 |
-
"Please upload at least 2 images to train your model (the ideal number with default settings is between 4-30)"
|
33 |
-
)
|
34 |
elif len(uploaded_images) > MAX_IMAGES:
|
35 |
-
raise
|
36 |
-
|
37 |
-
|
38 |
-
updates.append(gr.update(visible=True))
|
39 |
-
# Update visibility and image for each captioning row and image
|
40 |
for i in range(1, MAX_IMAGES + 1):
|
41 |
-
# Determine if the current row and image should be visible
|
42 |
visible = i <= len(uploaded_images)
|
43 |
-
|
44 |
-
# Update visibility of the captioning row
|
45 |
-
updates.append(gr.update(visible=visible))
|
46 |
|
47 |
-
# Update for image component - display image if available, otherwise hide
|
48 |
image_value = uploaded_images[i - 1] if visible else None
|
49 |
-
updates.append(
|
50 |
-
|
51 |
corresponding_caption = False
|
52 |
-
if
|
53 |
base_name = os.path.splitext(os.path.basename(image_value))[0]
|
54 |
-
print(base_name)
|
55 |
-
print(image_value)
|
56 |
if base_name in txt_files_dict:
|
57 |
-
print("entrou")
|
58 |
with open(txt_files_dict[base_name], 'r') as file:
|
59 |
corresponding_caption = file.read()
|
60 |
-
|
61 |
-
# Update value of captioning area
|
62 |
text_value = corresponding_caption if visible and corresponding_caption else "[trigger]" if visible and concept_sentence else None
|
63 |
-
updates.append(
|
64 |
-
|
65 |
-
|
66 |
-
updates.append(
|
67 |
-
|
68 |
-
updates.append(
|
69 |
-
updates.append(
|
70 |
-
|
71 |
-
updates.append(gr.update(visible=True))
|
72 |
return updates
|
73 |
|
74 |
def hide_captioning():
|
75 |
-
return
|
76 |
|
77 |
-
def create_dataset(*
|
78 |
-
|
79 |
-
|
80 |
-
destination_folder = str(f"datasets/{uuid.uuid4()}")
|
81 |
-
if not os.path.exists(destination_folder):
|
82 |
-
os.makedirs(destination_folder)
|
83 |
|
84 |
jsonl_file_path = os.path.join(destination_folder, "metadata.jsonl")
|
85 |
with open(jsonl_file_path, "a") as jsonl_file:
|
86 |
for index, image in enumerate(images):
|
87 |
new_image_path = shutil.copy(image, destination_folder)
|
88 |
-
|
89 |
-
original_caption = inputs[index + 1]
|
90 |
file_name = os.path.basename(new_image_path)
|
91 |
-
|
92 |
data = {"file_name": file_name, "prompt": original_caption}
|
93 |
-
|
94 |
jsonl_file.write(json.dumps(data) + "\n")
|
95 |
|
96 |
return destination_folder
|
97 |
|
98 |
-
|
99 |
def run_captioning(images, concept_sentence, *captions):
|
100 |
-
#Load internally to not consume resources for training
|
101 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
102 |
torch_dtype = torch.float16
|
103 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -107,8 +83,7 @@ def run_captioning(images, concept_sentence, *captions):
|
|
107 |
|
108 |
captions = list(captions)
|
109 |
for i, image_path in enumerate(images):
|
110 |
-
|
111 |
-
if isinstance(image_path, str): # If image is a file path
|
112 |
image = Image.open(image_path).convert("RGB")
|
113 |
|
114 |
prompt = "<DETAILED_CAPTION>"
|
@@ -157,25 +132,22 @@ def start_training(
|
|
157 |
):
|
158 |
push_to_hub = True
|
159 |
if not lora_name:
|
160 |
-
raise
|
161 |
try:
|
162 |
if whoami()["auth"]["accessToken"]["role"] == "write" or "repo.write" in whoami()["auth"]["accessToken"]["fineGrained"]["scoped"][0]["permissions"]:
|
163 |
-
|
164 |
else:
|
165 |
push_to_hub = False
|
166 |
-
|
167 |
except:
|
168 |
push_to_hub = False
|
169 |
-
|
170 |
-
|
171 |
-
print("Started training")
|
172 |
slugged_lora_name = slugify(lora_name)
|
173 |
|
174 |
-
# Load the default config
|
175 |
with open("config/examples/train_lora_flux_24gb.yaml", "r") as f:
|
176 |
config = yaml.safe_load(f)
|
177 |
|
178 |
-
# Update the config with user inputs
|
179 |
config["config"]["name"] = slugged_lora_name
|
180 |
config["config"]["process"][0]["model"]["low_vram"] = low_vram
|
181 |
config["config"]["process"][0]["train"]["skip_first_sample"] = True
|
@@ -185,16 +157,16 @@ def start_training(
|
|
185 |
config["config"]["process"][0]["network"]["linear_alpha"] = int(rank)
|
186 |
config["config"]["process"][0]["datasets"][0]["folder_path"] = dataset_folder
|
187 |
config["config"]["process"][0]["save"]["push_to_hub"] = push_to_hub
|
188 |
-
if
|
189 |
try:
|
190 |
username = whoami()["name"]
|
191 |
except:
|
192 |
-
raise
|
193 |
config["config"]["process"][0]["save"]["hf_repo_id"] = f"{username}/{slugged_lora_name}"
|
194 |
config["config"]["process"][0]["save"]["hf_private"] = True
|
195 |
if concept_sentence:
|
196 |
config["config"]["process"][0]["trigger_word"] = concept_sentence
|
197 |
-
|
198 |
if sample_1 or sample_2 or sample_3:
|
199 |
config["config"]["process"][0]["train"]["disable_sampling"] = False
|
200 |
config["config"]["process"][0]["sample"]["sample_every"] = steps
|
@@ -208,24 +180,20 @@ def start_training(
|
|
208 |
config["config"]["process"][0]["sample"]["prompts"].append(sample_3)
|
209 |
else:
|
210 |
config["config"]["process"][0]["train"]["disable_sampling"] = True
|
211 |
-
if
|
212 |
config["config"]["process"][0]["model"]["name_or_path"] = "black-forest-labs/FLUX.1-schnell"
|
213 |
config["config"]["process"][0]["model"]["assistant_lora_path"] = "ostris/FLUX.1-schnell-training-adapter"
|
214 |
config["config"]["process"][0]["sample"]["sample_steps"] = 4
|
215 |
-
if
|
216 |
more_advanced_options_dict = yaml.safe_load(more_advanced_options)
|
217 |
config["config"]["process"][0] = recursive_update(config["config"]["process"][0], more_advanced_options_dict)
|
218 |
-
|
219 |
-
|
220 |
-
# Save the updated config
|
221 |
-
# generate a random name for the config
|
222 |
random_config_name = str(uuid.uuid4())
|
223 |
os.makedirs("tmp", exist_ok=True)
|
224 |
config_path = f"tmp/{random_config_name}-{slugged_lora_name}.yaml"
|
225 |
with open(config_path, "w") as f:
|
226 |
yaml.dump(config, f)
|
227 |
-
|
228 |
-
# run the job locally
|
229 |
job = get_job(config_path)
|
230 |
job.run()
|
231 |
job.cleanup()
|
@@ -238,13 +206,13 @@ model:
|
|
238 |
is_flux: true
|
239 |
quantize: true
|
240 |
network:
|
241 |
-
linear: 16
|
242 |
-
linear_alpha: 16
|
243 |
type: lora
|
244 |
sample:
|
245 |
guidance_scale: 3.5
|
246 |
height: 1024
|
247 |
-
neg: ''
|
248 |
sample_every: 1000
|
249 |
sample_steps: 28
|
250 |
sampler: flowmatch
|
@@ -266,149 +234,81 @@ train:
|
|
266 |
gradient_accumulation_steps: 1
|
267 |
gradient_checkpointing: true
|
268 |
noise_scheduler: flowmatch
|
269 |
-
optimizer: adamw8bit
|
270 |
-
train_text_encoder: false
|
271 |
train_unet: true
|
272 |
'''
|
273 |
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
.
|
284 |
-
|
285 |
-
""
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
""
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
caption_list.append(locals()[f"caption_{i}"])
|
345 |
-
|
346 |
-
with gr.Accordion("Advanced options", open=False):
|
347 |
-
steps = gr.Number(label="Steps", value=1000, minimum=1, maximum=10000, step=1)
|
348 |
-
lr = gr.Number(label="Learning Rate", value=4e-4, minimum=1e-6, maximum=1e-3, step=1e-6)
|
349 |
-
rank = gr.Number(label="LoRA Rank", value=16, minimum=4, maximum=128, step=4)
|
350 |
-
model_to_train = gr.Radio(["dev", "schnell"], value="dev", label="Model to train")
|
351 |
-
low_vram = gr.Checkbox(label="Low VRAM", value=True)
|
352 |
-
with gr.Accordion("Even more advanced options", open=False):
|
353 |
-
use_more_advanced_options = gr.Checkbox(label="Use more advanced options", value=False)
|
354 |
-
more_advanced_options = gr.Code(config_yaml, language="yaml")
|
355 |
-
|
356 |
-
with gr.Accordion("Sample prompts (optional)", visible=False) as sample:
|
357 |
-
gr.Markdown(
|
358 |
-
"Include sample prompts to test out your trained model. Don't forget to include your trigger word/sentence (optional)"
|
359 |
-
)
|
360 |
-
sample_1 = gr.Textbox(label="Test prompt 1")
|
361 |
-
sample_2 = gr.Textbox(label="Test prompt 2")
|
362 |
-
sample_3 = gr.Textbox(label="Test prompt 3")
|
363 |
-
|
364 |
-
output_components.append(sample)
|
365 |
-
output_components.append(sample_1)
|
366 |
-
output_components.append(sample_2)
|
367 |
-
output_components.append(sample_3)
|
368 |
-
start = gr.Button("Start training", visible=False)
|
369 |
-
output_components.append(start)
|
370 |
-
progress_area = gr.Markdown("")
|
371 |
-
|
372 |
-
dataset_folder = gr.State()
|
373 |
-
|
374 |
-
images.upload(
|
375 |
-
load_captioning,
|
376 |
-
inputs=[images, concept_sentence],
|
377 |
-
outputs=output_components
|
378 |
-
)
|
379 |
-
|
380 |
-
images.delete(
|
381 |
-
load_captioning,
|
382 |
-
inputs=[images, concept_sentence],
|
383 |
-
outputs=output_components
|
384 |
-
)
|
385 |
-
|
386 |
-
images.clear(
|
387 |
-
hide_captioning,
|
388 |
-
outputs=[captioning_area, sample, start]
|
389 |
-
)
|
390 |
-
|
391 |
-
start.click(fn=create_dataset, inputs=[images] + caption_list, outputs=dataset_folder).then(
|
392 |
-
fn=start_training,
|
393 |
-
inputs=[
|
394 |
-
lora_name,
|
395 |
-
concept_sentence,
|
396 |
-
steps,
|
397 |
-
lr,
|
398 |
-
rank,
|
399 |
-
model_to_train,
|
400 |
-
low_vram,
|
401 |
-
dataset_folder,
|
402 |
-
sample_1,
|
403 |
-
sample_2,
|
404 |
-
sample_3,
|
405 |
-
use_more_advanced_options,
|
406 |
-
more_advanced_options
|
407 |
-
],
|
408 |
-
outputs=progress_area,
|
409 |
-
)
|
410 |
-
|
411 |
-
do_captioning.click(fn=run_captioning, inputs=[images, concept_sentence] + caption_list, outputs=caption_list)
|
412 |
|
413 |
if __name__ == "__main__":
|
414 |
-
|
|
|
1 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import uuid
|
|
|
3 |
import shutil
|
4 |
import json
|
5 |
import yaml
|
6 |
+
import torch
|
7 |
+
from PIL import Image
|
8 |
from slugify import slugify
|
9 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
10 |
+
import pinggy as pg
|
11 |
|
12 |
+
# Ensure the current working directory is in sys.path
|
13 |
+
import sys
|
14 |
+
sys.path.insert(0, os.getcwd())
|
15 |
sys.path.insert(0, "ai-toolkit")
|
16 |
from toolkit.job import get_job
|
17 |
+
from huggingface_hub import whoami
|
18 |
|
19 |
MAX_IMAGES = 150
|
20 |
|
|
|
23 |
txt_files = [file for file in uploaded_files if file.endswith('.txt')]
|
24 |
txt_files_dict = {os.path.splitext(os.path.basename(txt_file))[0]: txt_file for txt_file in txt_files}
|
25 |
updates = []
|
26 |
+
|
27 |
if len(uploaded_images) <= 1:
|
28 |
+
raise pg.Error("Please upload at least 2 images to train your model (the ideal number with default settings is between 4-30)")
|
|
|
|
|
29 |
elif len(uploaded_images) > MAX_IMAGES:
|
30 |
+
raise pg.Error(f"For now, only {MAX_IMAGES} or less images are allowed for training")
|
31 |
+
|
32 |
+
updates.append(pg.Update(visible=True))
|
|
|
|
|
33 |
for i in range(1, MAX_IMAGES + 1):
|
|
|
34 |
visible = i <= len(uploaded_images)
|
35 |
+
updates.append(pg.Update(visible=visible))
|
|
|
|
|
36 |
|
|
|
37 |
image_value = uploaded_images[i - 1] if visible else None
|
38 |
+
updates.append(pg.Update(value=image_value, visible=visible))
|
39 |
+
|
40 |
corresponding_caption = False
|
41 |
+
if image_value:
|
42 |
base_name = os.path.splitext(os.path.basename(image_value))[0]
|
|
|
|
|
43 |
if base_name in txt_files_dict:
|
|
|
44 |
with open(txt_files_dict[base_name], 'r') as file:
|
45 |
corresponding_caption = file.read()
|
46 |
+
|
|
|
47 |
text_value = corresponding_caption if visible and corresponding_caption else "[trigger]" if visible and concept_sentence else None
|
48 |
+
updates.append(pg.Update(value=text_value, visible=visible))
|
49 |
+
|
50 |
+
updates.append(pg.Update(visible=True))
|
51 |
+
updates.append(pg.Update(placeholder=f'A portrait of person in a bustling cafe {concept_sentence}', value=f'A person in a bustling cafe {concept_sentence}'))
|
52 |
+
updates.append(pg.Update(placeholder=f"A mountainous landscape in the style of {concept_sentence}"))
|
53 |
+
updates.append(pg.Update(placeholder=f"A {concept_sentence} in a mall"))
|
54 |
+
updates.append(pg.Update(visible=True))
|
55 |
+
|
|
|
56 |
return updates
|
57 |
|
58 |
def hide_captioning():
|
59 |
+
return pg.Update(visible=False), pg.Update(visible=False), pg.Update(visible=False)
|
60 |
|
61 |
+
def create_dataset(images, *captions):
|
62 |
+
destination_folder = f"datasets/{uuid.uuid4()}"
|
63 |
+
os.makedirs(destination_folder, exist_ok=True)
|
|
|
|
|
|
|
64 |
|
65 |
jsonl_file_path = os.path.join(destination_folder, "metadata.jsonl")
|
66 |
with open(jsonl_file_path, "a") as jsonl_file:
|
67 |
for index, image in enumerate(images):
|
68 |
new_image_path = shutil.copy(image, destination_folder)
|
69 |
+
original_caption = captions[index]
|
|
|
70 |
file_name = os.path.basename(new_image_path)
|
|
|
71 |
data = {"file_name": file_name, "prompt": original_caption}
|
|
|
72 |
jsonl_file.write(json.dumps(data) + "\n")
|
73 |
|
74 |
return destination_folder
|
75 |
|
|
|
76 |
def run_captioning(images, concept_sentence, *captions):
|
|
|
77 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
78 |
torch_dtype = torch.float16
|
79 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
83 |
|
84 |
captions = list(captions)
|
85 |
for i, image_path in enumerate(images):
|
86 |
+
if isinstance(image_path, str):
|
|
|
87 |
image = Image.open(image_path).convert("RGB")
|
88 |
|
89 |
prompt = "<DETAILED_CAPTION>"
|
|
|
132 |
):
|
133 |
push_to_hub = True
|
134 |
if not lora_name:
|
135 |
+
raise pg.Error("You forgot to insert your LoRA name! This name has to be unique.")
|
136 |
try:
|
137 |
if whoami()["auth"]["accessToken"]["role"] == "write" or "repo.write" in whoami()["auth"]["accessToken"]["fineGrained"]["scoped"][0]["permissions"]:
|
138 |
+
pg.Info(f"Starting training locally {whoami()['name']}. Your LoRA will be available locally and in Hugging Face after it finishes.")
|
139 |
else:
|
140 |
push_to_hub = False
|
141 |
+
pg.Warning("Started training locally. Your LoRa will only be available locally because you didn't login with a `write` token to Hugging Face")
|
142 |
except:
|
143 |
push_to_hub = False
|
144 |
+
pg.Warning("Started training locally. Your LoRa will only be available locally because you didn't login with a `write` token to Hugging Face")
|
145 |
+
|
|
|
146 |
slugged_lora_name = slugify(lora_name)
|
147 |
|
|
|
148 |
with open("config/examples/train_lora_flux_24gb.yaml", "r") as f:
|
149 |
config = yaml.safe_load(f)
|
150 |
|
|
|
151 |
config["config"]["name"] = slugged_lora_name
|
152 |
config["config"]["process"][0]["model"]["low_vram"] = low_vram
|
153 |
config["config"]["process"][0]["train"]["skip_first_sample"] = True
|
|
|
157 |
config["config"]["process"][0]["network"]["linear_alpha"] = int(rank)
|
158 |
config["config"]["process"][0]["datasets"][0]["folder_path"] = dataset_folder
|
159 |
config["config"]["process"][0]["save"]["push_to_hub"] = push_to_hub
|
160 |
+
if push_to_hub:
|
161 |
try:
|
162 |
username = whoami()["name"]
|
163 |
except:
|
164 |
+
raise pg.Error("Error trying to retrieve your username. Are you sure you are logged in with Hugging Face?")
|
165 |
config["config"]["process"][0]["save"]["hf_repo_id"] = f"{username}/{slugged_lora_name}"
|
166 |
config["config"]["process"][0]["save"]["hf_private"] = True
|
167 |
if concept_sentence:
|
168 |
config["config"]["process"][0]["trigger_word"] = concept_sentence
|
169 |
+
|
170 |
if sample_1 or sample_2 or sample_3:
|
171 |
config["config"]["process"][0]["train"]["disable_sampling"] = False
|
172 |
config["config"]["process"][0]["sample"]["sample_every"] = steps
|
|
|
180 |
config["config"]["process"][0]["sample"]["prompts"].append(sample_3)
|
181 |
else:
|
182 |
config["config"]["process"][0]["train"]["disable_sampling"] = True
|
183 |
+
if model_to_train == "schnell":
|
184 |
config["config"]["process"][0]["model"]["name_or_path"] = "black-forest-labs/FLUX.1-schnell"
|
185 |
config["config"]["process"][0]["model"]["assistant_lora_path"] = "ostris/FLUX.1-schnell-training-adapter"
|
186 |
config["config"]["process"][0]["sample"]["sample_steps"] = 4
|
187 |
+
if use_more_advanced_options:
|
188 |
more_advanced_options_dict = yaml.safe_load(more_advanced_options)
|
189 |
config["config"]["process"][0] = recursive_update(config["config"]["process"][0], more_advanced_options_dict)
|
190 |
+
|
|
|
|
|
|
|
191 |
random_config_name = str(uuid.uuid4())
|
192 |
os.makedirs("tmp", exist_ok=True)
|
193 |
config_path = f"tmp/{random_config_name}-{slugged_lora_name}.yaml"
|
194 |
with open(config_path, "w") as f:
|
195 |
yaml.dump(config, f)
|
196 |
+
|
|
|
197 |
job = get_job(config_path)
|
198 |
job.run()
|
199 |
job.cleanup()
|
|
|
206 |
is_flux: true
|
207 |
quantize: true
|
208 |
network:
|
209 |
+
linear: 16
|
210 |
+
linear_alpha: 16
|
211 |
type: lora
|
212 |
sample:
|
213 |
guidance_scale: 3.5
|
214 |
height: 1024
|
215 |
+
neg: ''
|
216 |
sample_every: 1000
|
217 |
sample_steps: 28
|
218 |
sampler: flowmatch
|
|
|
234 |
gradient_accumulation_steps: 1
|
235 |
gradient_checkpointing: true
|
236 |
noise_scheduler: flowmatch
|
237 |
+
optimizer: adamw8bit
|
238 |
+
train_text_encoder: false
|
239 |
train_unet: true
|
240 |
'''
|
241 |
|
242 |
+
def main():
|
243 |
+
with pg.App() as app:
|
244 |
+
app.add_page(title="LoRA Ease for FLUX", description="Train a high quality FLUX LoRA in a breeze")
|
245 |
+
|
246 |
+
app.add_textbox(
|
247 |
+
id="lora_name",
|
248 |
+
label="The name of your LoRA",
|
249 |
+
placeholder="e.g.: Persian Miniature Painting style, Cat Toy",
|
250 |
+
)
|
251 |
+
app.add_textbox(
|
252 |
+
id="concept_sentence",
|
253 |
+
label="Trigger word/sentence",
|
254 |
+
placeholder="uncommon word like p3rs0n or trtcrd, or sentence like 'in the style of CNSTLL'",
|
255 |
+
)
|
256 |
+
|
257 |
+
image_upload = app.add_file_upload(
|
258 |
+
id="images",
|
259 |
+
label="Upload your images",
|
260 |
+
file_types=["image", ".txt"],
|
261 |
+
multiple=True,
|
262 |
+
)
|
263 |
+
|
264 |
+
captioning_area = app.add_container(id="captioning_area", visible=False)
|
265 |
+
captioning_area.add_text("Custom captioning")
|
266 |
+
do_captioning = app.add_button("Add AI captions with Florence-2", id="do_captioning")
|
267 |
+
|
268 |
+
for i in range(1, MAX_IMAGES + 1):
|
269 |
+
with captioning_area.add_row(id=f"captioning_row_{i}", visible=False) as row:
|
270 |
+
row.add_image(id=f"image_{i}", width=111, height=111, visible=False)
|
271 |
+
row.add_textbox(id=f"caption_{i}", label=f"Caption {i}")
|
272 |
+
|
273 |
+
app.add_accordion(title="Advanced options", open=False)
|
274 |
+
app.add_number(id="steps", label="Steps", value=1000, min=1, max=10000)
|
275 |
+
app.add_number(id="lr", label="Learning Rate", value=4e-4, min=1e-6, max=1e-3)
|
276 |
+
app.add_number(id="rank", label="LoRA Rank", value=16, min=4, max=128)
|
277 |
+
app.add_radio(id="model_to_train", options=["dev", "schnell"], value="dev", label="Model to train")
|
278 |
+
app.add_checkbox(id="low_vram", label="Low VRAM", value=True)
|
279 |
+
|
280 |
+
with app.add_accordion(title="Even more advanced options", open=False):
|
281 |
+
app.add_checkbox(id="use_more_advanced_options", label="Use more advanced options", value=False)
|
282 |
+
app.add_code(id="more_advanced_options", value=config_yaml, language="yaml")
|
283 |
+
|
284 |
+
app.add_accordion(title="Sample prompts (optional)", visible=False)
|
285 |
+
app.add_textbox(id="sample_1", label="Test prompt 1")
|
286 |
+
app.add_textbox(id="sample_2", label="Test prompt 2")
|
287 |
+
app.add_textbox(id="sample_3", label="Test prompt 3")
|
288 |
+
|
289 |
+
start = app.add_button("Start training", id="start", visible=False)
|
290 |
+
progress_area = app.add_text("")
|
291 |
+
|
292 |
+
app.on_upload(id="images", fn=load_captioning, inputs=["images", "concept_sentence"], outputs=["captioning_area", "sample", "start"])
|
293 |
+
app.on_click(id="do_captioning", fn=run_captioning, inputs=["images", "concept_sentence"] + [f"caption_{i}" for i in range(1, MAX_IMAGES + 1)], outputs=[f"caption_{i}" for i in range(1, MAX_IMAGES + 1)])
|
294 |
+
app.on_click(id="start", fn=create_dataset, inputs=["images"] + [f"caption_{i}" for i in range(1, MAX_IMAGES + 1)], outputs=["dataset_folder"])
|
295 |
+
app.on_click(id="start", fn=start_training, inputs=[
|
296 |
+
"lora_name",
|
297 |
+
"concept_sentence",
|
298 |
+
"steps",
|
299 |
+
"lr",
|
300 |
+
"rank",
|
301 |
+
"model_to_train",
|
302 |
+
"low_vram",
|
303 |
+
"dataset_folder",
|
304 |
+
"sample_1",
|
305 |
+
"sample_2",
|
306 |
+
"sample_3",
|
307 |
+
"use_more_advanced_options",
|
308 |
+
"more_advanced_options"
|
309 |
+
], outputs=["progress_area"])
|
310 |
+
|
311 |
+
app.run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
312 |
|
313 |
if __name__ == "__main__":
|
314 |
+
main()
|