same899 commited on
Commit
5b7ef6c
·
verified ·
1 Parent(s): 29cd825

Upload style.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. style.py +128 -0
style.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import json
4
+ import os
5
+
6
+ import torch
7
+ from accelerate import PartialState
8
+ from src_inference.lora_helper import set_single_lora
9
+ from src_inference.pipeline import FluxPipeline
10
+ from PIL import Image
11
+
12
+
13
+ def clear_cache(transformer):
14
+ for _, attn_processor in transformer.attn_processors.items():
15
+ attn_processor.bank_kv.clear()
16
+
17
+
18
+ class style_processor:
19
+ def __init__(self, flux_path, lora_path, omni_path, device):
20
+ # Initialize model
21
+ self.device = device
22
+ self.base_path = flux_path # assuming 'flux' is the base path
23
+ self.pipe = FluxPipeline.from_pretrained(
24
+ self.base_path, torch_dtype=torch.bfloat16
25
+ ).to(self.device)
26
+ self.style_prompt = f"{os.path.basename(lora_path).replace('_rank128_bf16.safetensors', '').replace('_', ' ').title()} style, "
27
+
28
+ # Load OmniConsistency model
29
+ set_single_lora(
30
+ self.pipe.transformer,
31
+ omni_path,
32
+ lora_weights=[1],
33
+ cond_size=512,
34
+ )
35
+
36
+ # Load external LoRA
37
+ self.pipe.unload_lora_weights()
38
+ self.pipe.load_lora_weights(lora_path, weight_name="lora_name.safetensors")
39
+
40
+ def process(self, image_path, prompt):
41
+ if isinstance(image_path, str):
42
+ spatial_image = [Image.open(image_path).convert("RGB")]
43
+ elif isinstance(image_path, Image.Image):
44
+ spatial_image = [image_path]
45
+ else:
46
+ raise ValueError(f"Invalid image type: {type(image_path)}")
47
+
48
+ subject_images = []
49
+
50
+ width, height = spatial_image[0].size
51
+
52
+ image = self.pipe(
53
+ prompt,
54
+ height=height,
55
+ width=width,
56
+ guidance_scale=3.5,
57
+ num_inference_steps=25,
58
+ max_sequence_length=512,
59
+ generator=torch.Generator("cpu").manual_seed(5),
60
+ spatial_images=spatial_image,
61
+ subject_images=subject_images,
62
+ cond_size=512,
63
+ ).images[0]
64
+
65
+ # Clear cache after generation
66
+ clear_cache(self.pipe.transformer)
67
+
68
+ return image
69
+
70
+
71
+ def get_images_from_path(path):
72
+ if os.path.isdir(path):
73
+ return glob.glob(os.path.join(path, "*.jpg")) + glob.glob(
74
+ os.path.join(path, "*.png")
75
+ )
76
+ elif os.path.isfile(path) and (path.endswith(".jpg") or path.endswith(".png")):
77
+ return [path]
78
+ else:
79
+ return []
80
+
81
+
82
+ def parse_args():
83
+ parser = argparse.ArgumentParser(description="Style processor")
84
+ parser.add_argument("--flux_path", type=str, required=True)
85
+ parser.add_argument("--lora_paths", type=str, required=True, nargs="+")
86
+ parser.add_argument("--omni_path", type=str, required=True)
87
+ parser.add_argument("--output_dir", type=str, required=True)
88
+ parser.add_argument("--prompt_dir", type=str, required=True)
89
+ parser.add_argument("--images_path", type=str, required=True)
90
+ return parser.parse_args()
91
+
92
+
93
+ if __name__ == "__main__":
94
+ args = parse_args()
95
+ flux_path = args.flux_path
96
+ lora_paths = args.lora_paths
97
+ omni_path = args.omni_path
98
+ output_dir = args.output_dir
99
+ prompt_dir = args.prompt_dir
100
+ images_path = args.images_path
101
+
102
+ distributed_state = PartialState()
103
+
104
+ device = distributed_state.device
105
+ rank = int(str(device).split(":")[1])
106
+ lora = lora_paths[rank]
107
+
108
+ output_lora_path = os.path.join(output_dir, os.path.basename(lora))
109
+ os.makedirs(output_lora_path, exist_ok=True)
110
+
111
+ processor = style_processor(flux_path, lora, omni_path, device)
112
+
113
+ images_path = get_images_from_path(images_path)
114
+ for image_path in images_path:
115
+ image_output_path = os.path.join(output_lora_path, os.path.basename(image_path))
116
+ if os.path.exists(image_output_path):
117
+ print(f"File {image_output_path} already exists, skipping.")
118
+ continue
119
+
120
+ try:
121
+ with open(
122
+ os.path.join(prompt_dir, os.path.basename(image_path) + ".json")
123
+ ) as f:
124
+ prompt = json.load(f)["caption"]
125
+ output = processor.process(image_path, processor.style_prompt + prompt)
126
+ output.save(image_output_path)
127
+ except Exception as e:
128
+ print(f"Error processing {image_path}: {e}")