sagar007 commited on
Commit
f0e2bd1
·
verified ·
1 Parent(s): fb79b39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -40
app.py CHANGED
@@ -1,47 +1,95 @@
 
1
  import gradio as gr
2
  import torch
3
- from huggingface_hub import hf_hub_download, login
4
- from llama_cpp import Llama
5
- from peft import PeftModel, PeftConfig
6
- import os
7
-
8
- # Authenticate with Hugging Face
9
- if 'HF_TOKEN' in os.environ:
10
- login(token=os.environ['HF_TOKEN'])
11
- else:
12
- raise ValueError("HF_TOKEN not found in environment variables. Please add it to your Space's secrets.")
13
-
14
- # Download GGUF model
15
- model_path = hf_hub_download(repo_id="city96/FLUX.1-dev-gguf", filename="flux1-dev-Q4_K_S.gguf")
16
-
17
- # Initialize the GGUF model
18
- llm = Llama(model_path=model_path, n_ctx=2048, n_gpu_layers=-1)
19
-
20
- # Load your LoRA weights
21
- lora_path = "sagar007/sagar_flux"
22
- config = PeftConfig.from_pretrained(lora_path)
23
- model = PeftModel.from_pretrained(llm, lora_path)
24
-
25
- def generate_image(prompt, guidance_scale, num_inference_steps):
26
- # This is a placeholder for the actual image generation logic
27
- # You'll need to implement the correct way to use your model for image generation
28
- output = model.generate(prompt, max_new_tokens=100)
29
- # For now, we'll return a placeholder image
30
- return gr.Image.update(value=None, visible=True)
31
-
32
- # Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  with gr.Blocks() as app:
34
- gr.Markdown("# GGUF FLUX Model with Sagar LoRA Image Generator")
35
  with gr.Row():
36
- with gr.Column():
37
- prompt = gr.Textbox(label="Prompt", placeholder="Enter your image description here")
38
- guidance_scale = gr.Slider(minimum=1, maximum=20, value=7.5, label="Guidance Scale")
39
- num_inference_steps = gr.Slider(minimum=1, maximum=100, value=50, label="Number of Inference Steps")
40
- generate_btn = gr.Button("Generate Image")
41
- with gr.Column():
42
- output = gr.Image(label="Generated Image")
 
 
 
 
 
 
 
 
 
43
 
44
- generate_btn.click(generate_image, inputs=[prompt, guidance_scale, num_inference_steps], outputs=output)
 
 
 
 
45
 
46
- # Launch the app
47
  app.launch()
 
1
+ import spaces
2
  import gradio as gr
3
  import torch
4
+ from PIL import Image
5
+ from diffusers import DiffusionPipeline
6
+ import random
7
+
8
+ # Initialize the base model and specific LoRA
9
+ base_model = "city96/FLUX.1-dev-gguf/flux1-dev-Q4_K_S.gguf"
10
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
11
+ pipe.to("cpu")
12
+
13
+ lora_repo = "sagar007/sagar_flux"
14
+ trigger_word = "" # Leave trigger_word blank if not used.
15
+ pipe.load_lora_weights(lora_repo)
16
+
17
+ MAX_SEED = 2**32-1
18
+
19
+
20
+ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
21
+ # Set random seed for reproducibility
22
+ if randomize_seed:
23
+ seed = random.randint(0, MAX_SEED)
24
+ generator = torch.Generator(device="cuda").manual_seed(seed)
25
+
26
+ # Update progress bar (0% saat mulai)
27
+ progress(0, "Starting image generation...")
28
+
29
+ # Generate image with progress updates
30
+ for i in range(1, steps + 1):
31
+ # Simulate the processing step (in a real scenario, you would integrate this with your image generation process)
32
+ if i % (steps // 10) == 0: # Update every 10% of the steps
33
+ progress(i / steps * 100, f"Processing step {i} of {steps}...")
34
+
35
+ # Generate image using the pipeline
36
+ image = pipe(
37
+ prompt=f"{prompt} {trigger_word}",
38
+ num_inference_steps=steps,
39
+ guidance_scale=cfg_scale,
40
+ width=width,
41
+ height=height,
42
+ generator=generator,
43
+ joint_attention_kwargs={"scale": lora_scale},
44
+ ).images[0]
45
+
46
+ # Final update (100%)
47
+ progress(100, "Completed!")
48
+
49
+ yield image, seed
50
+
51
+ # Example cached image and settings
52
+ example_image_path = "example0.webp" # Replace with the actual path to the example image
53
+ example_prompt = """A Jelita Sukawati speaker is captured mid-speech. She has long, dark brown hair that cascades over her shoulders, framing her radiant, smiling face. Her Latina features are highlighted by warm, sun-kissed skin and bright, expressive eyes. She gestures with her left hand, displaying a delicate ring on her pinky finger, as she speaks passionately.
54
+ The woman is wearing a colorful, patterned dress with a green lanyard featuring multiple badges and logos hanging around her neck. The lanyard prominently displays the "CagliostroLab" text.
55
+ Behind her, there is a blurred background with a white banner containing logos and text, indicating a professional or conference setting. The overall scene captures the energy and vibrancy of her presentation."""
56
+ example_cfg_scale = 3.2
57
+ example_steps = 32
58
+ example_width = 1152
59
+ example_height = 896
60
+ example_seed = 3981632454
61
+ example_lora_scale = 0.85
62
+
63
+ def load_example():
64
+ # Load example image from file
65
+ example_image = Image.open(example_image_path)
66
+ return example_prompt, example_cfg_scale, example_steps, False, example_seed, example_width, example_height, example_lora_scale, example_image
67
+
68
  with gr.Blocks() as app:
69
+ gr.Markdown("# Flux RealismLora Image Generator")
70
  with gr.Row():
71
+ with gr.Column(scale=3):
72
+ prompt = gr.TextArea(label="Prompt", placeholder="Type a prompt", lines=5)
73
+ generate_button = gr.Button("Generate")
74
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=example_cfg_scale)
75
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=example_steps)
76
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=example_width)
77
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=example_height)
78
+ randomize_seed = gr.Checkbox(False, label="Randomize seed")
79
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=example_seed)
80
+ lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=example_lora_scale)
81
+ with gr.Column(scale=1):
82
+ result = gr.Image(label="Generated Image")
83
+ gr.Markdown("Generate images using RealismLora and a text prompt.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]")
84
+
85
+ # Automatically load example data and image when the interface is launched
86
+ app.load(load_example, inputs=[], outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, result])
87
 
88
+ generate_button.click(
89
+ run_lora,
90
+ inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
91
+ outputs=[result, seed]
92
+ )
93
 
94
+ app.queue()
95
  app.launch()