Jyothirmai commited on
Commit
c9254be
β€’
1 Parent(s): 9cda1d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -10,8 +10,6 @@ import cnnrnn
10
  from build_vocab import Vocabulary
11
 
12
 
13
-
14
-
15
  # Caption generation functions
16
  def generate_caption_clipgpt(image, max_tokens, temperature):
17
  caption = clipGPT.generate_caption_clipgpt(image, max_tokens, temperature)
@@ -35,12 +33,10 @@ def generate_caption_cnnrnn(image):
35
 
36
 
37
  with gr.Row():
38
-
39
  image = gr.Image(label="Upload Chest X-ray", type="pil")
40
 
41
 
42
  with gr.Row():
43
-
44
  with gr.Column(): # Column for dropdowns and model choice
45
  max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
46
  temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.7)
@@ -59,7 +55,7 @@ def predict(img, model_name, max_tokens, temperature):
59
  elif model_name == "ViT-CoAttention":
60
  return generate_caption_vitCoAtt(img)
61
  elif model_name == "Baseline Model CNN-RNN":
62
- print(img)
63
  return generate_caption_cnnrnn(img)
64
  else:
65
  return "Caption generation for this model is not yet implemented."
@@ -68,6 +64,7 @@ def predict(img, model_name, max_tokens, temperature):
68
 
69
  examples = [[f"example{i}.jpg"] for i in range(1,4)]
70
 
 
71
  description= "You can generate captions by uploading an X-Ray and selecting a model of your choice below. Please select the number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models"
72
  title = "MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–"
73
 
 
10
  from build_vocab import Vocabulary
11
 
12
 
 
 
13
  # Caption generation functions
14
  def generate_caption_clipgpt(image, max_tokens, temperature):
15
  caption = clipGPT.generate_caption_clipgpt(image, max_tokens, temperature)
 
33
 
34
 
35
  with gr.Row():
 
36
  image = gr.Image(label="Upload Chest X-ray", type="pil")
37
 
38
 
39
  with gr.Row():
 
40
  with gr.Column(): # Column for dropdowns and model choice
41
  max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
42
  temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.7)
 
55
  elif model_name == "ViT-CoAttention":
56
  return generate_caption_vitCoAtt(img)
57
  elif model_name == "Baseline Model CNN-RNN":
58
+ print(img.name)
59
  return generate_caption_cnnrnn(img)
60
  else:
61
  return "Caption generation for this model is not yet implemented."
 
64
 
65
  examples = [[f"example{i}.jpg"] for i in range(1,4)]
66
 
67
+
68
  description= "You can generate captions by uploading an X-Ray and selecting a model of your choice below. Please select the number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models"
69
  title = "MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–"
70