jonwiese commited on
Commit
831d468
1 Parent(s): 7680b48
Files changed (3) hide show
  1. README.md +5 -6
  2. app.py +97 -0
  3. requirements.txt +12 -0
README.md CHANGED
@@ -1,12 +1,11 @@
1
  ---
2
- title: Aiste
3
- emoji: 👁
4
- colorFrom: red
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 5.9.1
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Fhnw Image Lora
3
+ emoji: 🐠
4
+ colorFrom: indigo
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: 5.6.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
 
app.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ import spaces
5
+ import torch
6
+ from diffusers import DiffusionPipeline
7
+
8
+ dtype = torch.bfloat16
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+
11
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
12
+
13
+ MAX_SEED = np.iinfo(np.int32).max
14
+ MAX_IMAGE_SIZE = 1024
15
+
16
+ @spaces.GPU()
17
+ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
18
+ lora = "kratadata/aiste-tiles"
19
+ prefix = "Abstract glazed, tile pattern of "
20
+ suffix = ""
21
+
22
+ if randomize_seed:
23
+ seed = random.randint(0, MAX_SEED)
24
+ generator = torch.Generator().manual_seed(seed)
25
+ full_prompt = prefix + " " + prompt + " " + suffix
26
+ pipe.load_lora_weights(lora)
27
+ image = pipe(
28
+ prompt = full_prompt,
29
+ width = width,
30
+ height = height,
31
+ num_inference_steps = num_inference_steps,
32
+ generator = generator,
33
+ guidance_scale=0.0
34
+ ).images[0]
35
+ return image, seed
36
+
37
+
38
+ with gr.Blocks() as demo:
39
+ with gr.Row():
40
+ with gr.Column():
41
+ prompt = gr.Textbox(
42
+ label="Image Prompt",
43
+ show_label = "True",
44
+ info="Your image prompt",
45
+ max_lines=4,
46
+ placeholder="Enter your prompt",
47
+ container=True,
48
+ )
49
+ with gr.Accordion("Advanced Settings", open=False):
50
+ width = gr.Slider(
51
+ label="Width",
52
+ minimum=256,
53
+ maximum=MAX_IMAGE_SIZE,
54
+ step=32,
55
+ value=1024,
56
+ info = "Keep at 1024 for best results"
57
+ )
58
+ height = gr.Slider(
59
+ label="Height",
60
+ minimum=256,
61
+ maximum=MAX_IMAGE_SIZE,
62
+ step=32,
63
+ value=1024,
64
+ info = "Keep at 1024 for best results"
65
+ )
66
+ num_inference_steps = gr.Slider(
67
+ label="Number of inference steps",
68
+ minimum=1,
69
+ maximum=8,
70
+ step=1,
71
+ value=2,
72
+ info = "Increase to 4 for better results"
73
+ )
74
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True, info = "Keep true to generate a new image each time")
75
+ seed = gr.Slider(
76
+ label="Seed",
77
+ minimum=0,
78
+ maximum=MAX_SEED,
79
+ step=1,
80
+ value=0,
81
+ info = "Fix seed if you want to keep generating the same image"
82
+ )
83
+ run_button = gr.Button("Run", scale=0)
84
+
85
+ with gr.Column():
86
+ result = gr.Image(label="Result", show_label=False, format="jpeg")
87
+
88
+
89
+
90
+ gr.on(
91
+ triggers=[run_button.click, prompt.submit],
92
+ fn = infer,
93
+ inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps],
94
+ outputs = [result, seed]
95
+ )
96
+
97
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ spaces
3
+ diffusers
4
+ transformers
5
+ accelerate
6
+ sentencepiece
7
+ protobuf
8
+ peft
9
+ numpy==1.26.1
10
+ #optimum-quanto
11
+
12
+ --extra-index-url https://download.pytorch.org/whl/cu121