init
Browse files- .DS_Store +0 -0
- README.md +13 -0
- app.py +119 -0
- config.json +25 -0
- requirements.tx +1 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Colabor-Image-Finetune
|
3 |
+
emoji: 💻
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.29.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
short_description: Fine-tuned image to create artworks of persona
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import replicate
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
import gradio as gr
|
5 |
+
import requests
|
6 |
+
|
7 |
+
fish_api_key = os.getenv("FISH_API_KEY")
|
8 |
+
|
9 |
+
def load_config(config_path="config.json"):
|
10 |
+
"""Load configuration from JSON file"""
|
11 |
+
with open(config_path, 'r') as f:
|
12 |
+
return json.load(f)
|
13 |
+
|
14 |
+
config = load_config()
|
15 |
+
api_token = os.getenv("REPLICATE_API_TOKEN")
|
16 |
+
|
17 |
+
#api_token =config["replicate_api_token"]
|
18 |
+
#os.environ["REPLICATE_API_TOKEN"] = api_token
|
19 |
+
|
20 |
+
def save_image(output):
|
21 |
+
file_path = "generated_image.png"
|
22 |
+
image_url = output[0]
|
23 |
+
response = requests.get(image_url)
|
24 |
+
# Save the image
|
25 |
+
with open(file_path, "wb") as file:
|
26 |
+
file.write(response.content)
|
27 |
+
|
28 |
+
return file_path
|
29 |
+
|
30 |
+
def generate(lora_model, prompt, aspect_ratio, num_inference_steps, guidance_scale, seed, lora_scale):
|
31 |
+
selected_lora_model = config["image_model"][lora_model]["persona"]
|
32 |
+
trigger_word = config["image_model"][lora_model]["trigger_word"]
|
33 |
+
|
34 |
+
input = {
|
35 |
+
"model": "dev",
|
36 |
+
"prompt": f'{trigger_word} + " " + {prompt} + " " + in the style of + " " + {trigger_word}',
|
37 |
+
"go_fast": False,
|
38 |
+
"lora_scale": lora_scale,
|
39 |
+
"num_outputs": 1,
|
40 |
+
"aspect_ratio": aspect_ratio,
|
41 |
+
"output_format": "png",
|
42 |
+
"guidance_scale": guidance_scale,
|
43 |
+
"extra_lora_scale": lora_scale,
|
44 |
+
"num_inference_steps": num_inference_steps,
|
45 |
+
"seed": int(seed) if int(seed) != -1 else -1,
|
46 |
+
|
47 |
+
}
|
48 |
+
|
49 |
+
output = replicate.run(
|
50 |
+
selected_lora_model,
|
51 |
+
input=input
|
52 |
+
)
|
53 |
+
|
54 |
+
image_path = save_image(output)
|
55 |
+
return image_path
|
56 |
+
|
57 |
+
# Create Gradio interface
|
58 |
+
def create_interface():
|
59 |
+
|
60 |
+
with gr.Blocks(title="Image Generator") as interface:
|
61 |
+
gr.Markdown("# LoRA Image Generator")
|
62 |
+
|
63 |
+
with gr.Row():
|
64 |
+
with gr.Column():
|
65 |
+
lora_model = gr.Dropdown(
|
66 |
+
choices= ["group1", "group2", "group3", "group4", "group5"],
|
67 |
+
label="Choose your Image Model"
|
68 |
+
)
|
69 |
+
prompt = gr.Textbox(
|
70 |
+
label="Prompt",
|
71 |
+
|
72 |
+
)
|
73 |
+
aspect_ratio = gr.Dropdown(
|
74 |
+
choices=["1:1", "16:9", "9:16"],
|
75 |
+
label="Aspect Ratio",
|
76 |
+
value="1:1"
|
77 |
+
)
|
78 |
+
num_inference_steps = gr.Slider(
|
79 |
+
minimum=1,
|
80 |
+
maximum=50,
|
81 |
+
value=28,
|
82 |
+
step=1,
|
83 |
+
label="Inference Steps"
|
84 |
+
)
|
85 |
+
guidance_scale = gr.Slider(
|
86 |
+
minimum=0,
|
87 |
+
maximum=10,
|
88 |
+
value=3,
|
89 |
+
step=1,
|
90 |
+
label="Guidance Scale"
|
91 |
+
)
|
92 |
+
seed = gr.Number(
|
93 |
+
label="Seed (-1 for random)",
|
94 |
+
value=-1.0
|
95 |
+
)
|
96 |
+
lora_scale = gr.Slider(
|
97 |
+
minimum=-1.0,
|
98 |
+
maximum=3.0,
|
99 |
+
value=1.0,
|
100 |
+
step=0.05,
|
101 |
+
label="LoRA Scale"
|
102 |
+
)
|
103 |
+
generate_btn = gr.Button("Generate Image")
|
104 |
+
|
105 |
+
with gr.Column():
|
106 |
+
output_image = gr.Image(label="Generated Image")
|
107 |
+
|
108 |
+
generate_btn.click(
|
109 |
+
fn=generate,
|
110 |
+
inputs=[lora_model, prompt, aspect_ratio, num_inference_steps,
|
111 |
+
guidance_scale, seed, lora_scale],
|
112 |
+
outputs=output_image
|
113 |
+
)
|
114 |
+
|
115 |
+
return interface
|
116 |
+
|
117 |
+
if __name__ == "__main__":
|
118 |
+
interface = create_interface()
|
119 |
+
interface.launch()
|
config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"image_model": {
|
3 |
+
"group1" : {
|
4 |
+
"persona":"kratadata/persona1:c44c75daebd6dc27d65a43322ad8440479bf2dfc77e7fb4d670f02c2bf2a5a00",
|
5 |
+
"trigger_word": "TOK"
|
6 |
+
},
|
7 |
+
"group2" : {
|
8 |
+
"persona":"kratadata/persona2:8b40b98ccbf3d6080e955cf7a9e5a83e6306f85169c1d127db2b0104f29b571e",
|
9 |
+
"trigger_word":"picaka"
|
10 |
+
},
|
11 |
+
"group3" : {
|
12 |
+
"persona": "kratadata/persona3:c593b245347d9b030eda2cdb8955d9122c3281620a268ceb4bb6e18845a3bfb3",
|
13 |
+
"trigger_word" : "kAi"
|
14 |
+
},
|
15 |
+
"group4" : {
|
16 |
+
"persona": "kratadata/persona4:ac804d11ee98507be0e5dfa8c3a9dc112a0b88fb97a2eb4d3b79142477180e76",
|
17 |
+
"trigger_word" : "TUMULU"
|
18 |
+
},
|
19 |
+
"group5" : {
|
20 |
+
"persona": "kratadata/persona5:34b9586bb90f50f5b598b50d7781142e4cc5dcc1b32d2f4eae4b4379404f5f3e",
|
21 |
+
"trigger_word" : "JSCA"
|
22 |
+
}
|
23 |
+
|
24 |
+
}
|
25 |
+
}
|
requirements.tx
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
replicate
|