vinid commited on
Commit
d1b8523
1 Parent(s): 1948bbe

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -4,8 +4,11 @@ from plip_support import embed_text
4
  import numpy as np
5
  from PIL import Image
6
  import requests
 
 
7
  from io import BytesIO
8
  import streamlit as st
 
9
  import clip
10
  import torch
11
  from transformers import (
@@ -27,14 +30,20 @@ def embed_texts(model, texts, processor):
27
  )
28
  return embeddings
29
 
30
- @st.cache_resource
31
  def load_embeddings(embeddings_path):
32
  print("loading embeddings")
33
  return np.load(embeddings_path)
34
 
35
- @st.cache_resource
 
 
 
 
 
 
36
  def load_path_clip():
37
- model = VisionTextDualEncoderModel.from_pretrained("vinid/plip")
38
  processor = AutoProcessor.from_pretrained("vinid/plip")
39
  return model, processor
40
 
 
4
  import numpy as np
5
  from PIL import Image
6
  import requests
7
+ import transformers
8
+ import tokenizers
9
  from io import BytesIO
10
  import streamlit as st
11
+ from transformers import CLIPModel
12
  import clip
13
  import torch
14
  from transformers import (
 
30
  )
31
  return embeddings
32
 
33
+ @st.cache
34
  def load_embeddings(embeddings_path):
35
  print("loading embeddings")
36
  return np.load(embeddings_path)
37
 
38
+ @st.cache(
39
+ hash_funcs={
40
+ torch.nn.parameter.Parameter: lambda _: None,
41
+ tokenizers.Tokenizer: lambda _: None,
42
+ tokenizers.AddedToken: lambda _: None
43
+ }
44
+ )
45
  def load_path_clip():
46
+ model = CLIPModel.from_pretrained("vinid/plip")
47
  processor = AutoProcessor.from_pretrained("vinid/plip")
48
  return model, processor
49