GrayShine commited on
Commit
0723018
1 Parent(s): 4f488cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -178,7 +178,7 @@ def init_model():
178
  args.latent_h = latent_h
179
  args.latent_w = latent_w
180
  print('loading model')
181
- model = get_models(True, args).to(device)
182
  model = tca_transform_model(model).to(device)
183
  model = ip_transform_model(model).to(device)
184
  if args.use_compile:
@@ -196,8 +196,7 @@ def init_model():
196
  model.eval() # important!
197
  pretrained_model_path = args.pretrained_model_path
198
  vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").to(device)
199
- text_encoder = TextEmbedder(tokenizer_path=pretrained_model_path + "tokenizer",
200
- encoder_path=pretrained_model_path + "text_encoder").to(device)
201
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(args.image_encoder_path).to(device)
202
  clip_image_processor = CLIPImageProcessor()
203
  if args.use_fp16:
 
178
  args.latent_h = latent_h
179
  args.latent_w = latent_w
180
  print('loading model')
181
+ model = get_models(args).to(device)
182
  model = tca_transform_model(model).to(device)
183
  model = ip_transform_model(model).to(device)
184
  if args.use_compile:
 
196
  model.eval() # important!
197
  pretrained_model_path = args.pretrained_model_path
198
  vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").to(device)
199
+ text_encoder = TextEmbedder(pretrained_model_path).to(device)
 
200
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(args.image_encoder_path).to(device)
201
  clip_image_processor = CLIPImageProcessor()
202
  if args.use_fp16: