AlekseyCalvin commited on
Commit
1cb1121
·
verified ·
1 Parent(s): 9ca308b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -44,7 +44,7 @@ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda
44
  #pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
45
  torch.cuda.empty_cache()
46
 
47
- clipmodel = 'norm'
48
  if clipmodel == "long":
49
  model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
50
  config = CLIPConfig.from_pretrained(model_id)
@@ -55,13 +55,13 @@ if clipmodel == "norm":
55
  maxtokens = 77
56
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
57
  clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
58
- t5 = HFEmbedder("DeepFloyd/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
59
 
60
  pipe.tokenizer = clip_processor.tokenizer
61
  pipe.text_encoder = clip_model.text_model
62
  pipe.tokenizer_max_length = maxtokens
63
  pipe.text_encoder.dtype = torch.bfloat16
64
- pipe.text_encoder_2 = t5.text_model
65
 
66
  MAX_SEED = 2**32-1
67
 
 
44
  #pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
45
  torch.cuda.empty_cache()
46
 
47
+ clipmodel = 'long'
48
  if clipmodel == "long":
49
  model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
50
  config = CLIPConfig.from_pretrained(model_id)
 
55
  maxtokens = 77
56
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
57
  clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
58
+ #t5 = HFEmbedder("DeepFloyd/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
59
 
60
  pipe.tokenizer = clip_processor.tokenizer
61
  pipe.text_encoder = clip_model.text_model
62
  pipe.tokenizer_max_length = maxtokens
63
  pipe.text_encoder.dtype = torch.bfloat16
64
+ #pipe.text_encoder_2 = t5.text_model
65
 
66
  MAX_SEED = 2**32-1
67