zhiqiulin commited on
Commit
8e64bf0
1 Parent(s): 7aa2a58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import gradio as gr
2
  import spaces
3
  import torch
4
-
5
  torch.jit.script = lambda f: f # Avoid script error in lambda
6
 
7
  from t2v_metrics import VQAScore, list_all_vqascore_models
@@ -9,9 +8,8 @@ from t2v_metrics import VQAScore, list_all_vqascore_models
9
  # Global model variable, but do not initialize or move to CUDA here
10
  model_pipe = VQAScore(model="clip-flant5-xl", device="cuda") # our recommended scoring model
11
 
12
- @spaces.GPU
13
  def generate(model_name, image, text):
14
-
15
  print(list_all_vqascore_models()) # Debug: List available models
16
  print("Image:", image) # Debug: Print image path
17
  print("Text:", text) # Debug: Print text input
@@ -25,10 +23,12 @@ def generate(model_name, image, text):
25
 
26
  return result # Return the result
27
 
28
- iface = gr.Interface(
29
  fn=generate, # function to call
30
  inputs=[gr.Dropdown(["clip-flant5-xl", "clip-flant5-xxl"], label="Model Name"), gr.Image(type="filepath"), gr.Textbox(label="Prompt")], # define the types of inputs
31
  outputs="number", # define the type of output
32
  title="VQAScore", # title of the app
33
  description="This model evaluates the similarity between an image and a text prompt."
34
- ).launch()
 
 
 
1
  import gradio as gr
2
  import spaces
3
  import torch
 
4
  torch.jit.script = lambda f: f # Avoid script error in lambda
5
 
6
  from t2v_metrics import VQAScore, list_all_vqascore_models
 
8
  # Global model variable, but do not initialize or move to CUDA here
9
  model_pipe = VQAScore(model="clip-flant5-xl", device="cuda") # our recommended scoring model
10
 
11
+ @spaces.GPU(duration = 300)
12
  def generate(model_name, image, text):
 
13
  print(list_all_vqascore_models()) # Debug: List available models
14
  print("Image:", image) # Debug: Print image path
15
  print("Text:", text) # Debug: Print text input
 
23
 
24
  return result # Return the result
25
 
26
+ demo = gr.Interface(
27
  fn=generate, # function to call
28
  inputs=[gr.Dropdown(["clip-flant5-xl", "clip-flant5-xxl"], label="Model Name"), gr.Image(type="filepath"), gr.Textbox(label="Prompt")], # define the types of inputs
29
  outputs="number", # define the type of output
30
  title="VQAScore", # title of the app
31
  description="This model evaluates the similarity between an image and a text prompt."
32
+ )
33
+
34
+ demo.launch()