AlekseyCalvin commited on
Commit
8502174
·
verified ·
1 Parent(s): 2a6c9f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -13,7 +13,8 @@ from diffusers import DiffusionPipeline
13
  import bitsandbytes
14
  from diffusers.quantizers import PipelineQuantizationConfig
15
  from PIL import Image
16
- from diffusers import FluxKontextPipeline
 
17
  from diffusers.utils import load_image
18
  from huggingface_hub import login, hf_hub_download, HfFileSystem, ModelCard
19
  from huggingface_hub.utils._runtime import dump_environment_info
@@ -41,7 +42,7 @@ quant_config = PipelineQuantizationConfig(
41
  try:
42
  # Set max memory usage for ZeroGPU
43
  torch.cuda.set_per_process_memory_fraction(1.0)
44
- torch.set_float32_matmul_precision("high")
45
  except Exception as e:
46
  print(f"Error setting memory usage: {e}")
47
 
@@ -52,6 +53,15 @@ pipe = FluxKontextPipeline.from_pretrained(
52
  torch_dtype=torch.bfloat16
53
  ).to(DEVICE)
54
 
 
 
 
 
 
 
 
 
 
55
  # Load LoRA data (you'll need to create this JSON file or modify to load your LoRAs)
56
 
57
  with open("flux_loras.json", "r") as file:
 
13
  import bitsandbytes
14
  from diffusers.quantizers import PipelineQuantizationConfig
15
  from PIL import Image
16
+ from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
17
+ from diffusers import FluxKontextPipeline, DiffusionPipeline
18
  from diffusers.utils import load_image
19
  from huggingface_hub import login, hf_hub_download, HfFileSystem, ModelCard
20
  from huggingface_hub.utils._runtime import dump_environment_info
 
42
  try:
43
  # Set max memory usage for ZeroGPU
44
  torch.cuda.set_per_process_memory_fraction(1.0)
45
+ torch.set_float32_matmul_precision("medium")
46
  except Exception as e:
47
  print(f"Error setting memory usage: {e}")
48
 
 
53
  torch_dtype=torch.bfloat16
54
  ).to(DEVICE)
55
 
56
+ model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
57
+ config = CLIPConfig.from_pretrained(model_id)
58
+ config.text_config.max_position_embeddings = 248
59
+ clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
60
+ clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=248)
61
+ pipe.tokenizer = clip_processor.tokenizer
62
+ pipe.text_encoder = clip_model.text_model
63
+ pipe.tokenizer_max_length = 248
64
+ pipe.text_encoder.dtype = torch.bfloat16
65
  # Load LoRA data (you'll need to create this JSON file or modify to load your LoRAs)
66
 
67
  with open("flux_loras.json", "r") as file: