AlekseyCalvin commited on
Commit
df73a54
1 Parent(s): b2f7197

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -16
app.py CHANGED
@@ -13,10 +13,13 @@ import random
13
  import time
14
  from huggingface_hub import hf_hub_download
15
  from diffusers import FluxTransformer2DModel, FluxPipeline
 
16
  import safetensors.torch
17
  from safetensors.torch import load_file
18
- from transformers import CLIPModel, CLIPProcessor, CLIPConfig
19
  import gc
 
 
20
 
21
  cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
22
  os.environ["TRANSFORMERS_CACHE"] = cache_path
@@ -28,23 +31,15 @@ torch.backends.cuda.matmul.allow_tf32 = True
28
 
29
  pipe = FluxPipeline.from_pretrained("AlekseyCalvin/HistoricColorSoonr_v2_FluxSchnell_Diffusers", ignore_mismatched_sizes=True, torch_dtype=torch.bfloat16)
30
  pipe.to(device="cuda", dtype=torch.bfloat16)
31
- clipmodel = 'norm' # 'norm', 'long' (my fine-tunes) - 'oai', 'orgL' (OpenAI / BeichenZhang original)
32
-
33
- if clipmodel == "long":
34
- model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
35
- config = CLIPConfig.from_pretrained(model_id)
36
- maxtokens = 77
37
- if clipmodel == "norm":
38
- model_id = "zer0int/CLIP-GmP-ViT-L-14"
39
- config = CLIPConfig.from_pretrained(model_id)
40
- maxtokens = 77
41
- clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
42
- clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
43
- config.text_config.max_position_embeddings = 77
44
 
 
 
 
 
 
45
  pipe.tokenizer = clip_processor.tokenizer
46
  pipe.text_encoder = clip_model.text_model
47
- pipe.tokenizer_max_length = maxtokens
48
  pipe.text_encoder.dtype = torch.bfloat16
49
 
50
 
@@ -91,7 +86,7 @@ def update_selection(evt: gr.SelectData, width, height):
91
  height,
92
  )
93
 
94
- @spaces.GPU(duration=70)
95
 
96
  def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
97
  pipe.to("cuda")
 
13
  import time
14
  from huggingface_hub import hf_hub_download
15
  from diffusers import FluxTransformer2DModel, FluxPipeline
16
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
17
  import safetensors.torch
18
  from safetensors.torch import load_file
19
+ from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
20
  import gc
21
+ from gradio_client import Client
22
+ Client = Client("AlekseyCalvin/soonfactory4", hf_token=os.getenv("HF_TOKEN"))
23
 
24
  cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
25
  os.environ["TRANSFORMERS_CACHE"] = cache_path
 
31
 
32
  pipe = FluxPipeline.from_pretrained("AlekseyCalvin/HistoricColorSoonr_v2_FluxSchnell_Diffusers", ignore_mismatched_sizes=True, torch_dtype=torch.bfloat16)
33
  pipe.to(device="cuda", dtype=torch.bfloat16)
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
36
+ config = CLIPConfig.from_pretrained(model_id)
37
+ config.text_config.max_position_embeddings = 248
38
+ clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
39
+ clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=248)
40
  pipe.tokenizer = clip_processor.tokenizer
41
  pipe.text_encoder = clip_model.text_model
42
+ pipe.tokenizer_max_length = 248
43
  pipe.text_encoder.dtype = torch.bfloat16
44
 
45
 
 
86
  height,
87
  )
88
 
89
+ @spaces.GPU()
90
 
91
  def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
92
  pipe.to("cuda")