gchhablani commited on
Commit
50811dd
1 Parent(s): bea24f7

Remove persistent caching

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -28,7 +28,7 @@ def load_model(ckpt):
28
 
29
  tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es")
30
 
31
- @st.cache(persist=True)
32
  def generate_sequence(pixel_values, num_beams, temperature, top_p):
33
  output_ids = state.model.generate(input_ids=pixel_values, max_length=64, num_beams=num_beams, temperature=temperature, top_p = top_p)
34
  print(output_ids)
 
28
 
29
  tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es")
30
 
31
+ @st.cache
32
  def generate_sequence(pixel_values, num_beams, temperature, top_p):
33
  output_ids = state.model.generate(input_ids=pixel_values, max_length=64, num_beams=num_beams, temperature=temperature, top_p = top_p)
34
  print(output_ids)