VaianiLorenzo commited on
Commit
998d998
1 Parent(s): 445116f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -306,13 +306,15 @@ def draw_audio(
306
 
307
  image = Image.open("data/logo.png")
308
  st.image(image, use_column_width="always")
 
 
309
 
310
  if 'model' not in st.session_state:
311
  #with st.spinner('We are orginizing your traks...'):
312
  text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
313
  vision_encoder = CLIPVisionModel.from_pretrained(CLIP_VISION_MODEL_PATH, local_files_only=True)
314
  tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL)
315
- model = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer)
316
  model.compute_image_embeddings(glob.glob(SPECTROGRAMS_PATH+"/*.jpeg")[:5000])
317
  st.session_state["model"] = model
318
  #st.session_state['model'] = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer)
 
306
 
307
  image = Image.open("data/logo.png")
308
  st.image(image, use_column_width="always")
309
+
310
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
311
 
312
  if 'model' not in st.session_state:
313
  #with st.spinner('We are orginizing your traks...'):
314
  text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
315
  vision_encoder = CLIPVisionModel.from_pretrained(CLIP_VISION_MODEL_PATH, local_files_only=True)
316
  tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL)
317
+ model = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer, device=device)
318
  model.compute_image_embeddings(glob.glob(SPECTROGRAMS_PATH+"/*.jpeg")[:5000])
319
  st.session_state["model"] = model
320
  #st.session_state['model'] = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer)