AlekseyCalvin commited on
Commit
f779426
·
verified ·
1 Parent(s): 317f109

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -55,12 +55,12 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
55
 
56
  model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
57
  config = CLIPConfig.from_pretrained(model_id)
58
- config.text_config.max_position_embeddings = 248
59
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
60
- clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=248)
61
  pipe.tokenizer = clip_processor.tokenizer
62
  pipe.text_encoder = clip_model.text_model
63
- pipe.tokenizer_max_length = 248
64
  pipe.text_encoder.dtype = torch.bfloat16
65
 
66
  #clipmodel = 'norm'
 
55
 
56
  model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
57
  config = CLIPConfig.from_pretrained(model_id)
58
+ config.text_config.max_position_embeddings = 77
59
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
60
+ clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=77)
61
  pipe.tokenizer = clip_processor.tokenizer
62
  pipe.text_encoder = clip_model.text_model
63
+ pipe.tokenizer_max_length = 77
64
  pipe.text_encoder.dtype = torch.bfloat16
65
 
66
  #clipmodel = 'norm'