Jyothirmai commited on
Commit
aafac25
Β·
verified Β·
1 Parent(s): 6ae0110

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -9
app.py CHANGED
@@ -11,11 +11,11 @@ from build_vocab import Vocabulary
11
 
12
 
13
  # Caption generation functions
14
- def generate_caption_clipgpt(image):
15
  caption = clipGPT.generate_caption_clipgpt(image)
16
  return caption
17
 
18
- def generate_caption_vitgpt(image):
19
  caption = vitGPT.generate_caption(image)
20
  return caption
21
 
@@ -26,11 +26,17 @@ def generate_caption_vitCoAtt(image):
26
 
27
  with gr.Blocks() as demo:
28
 
29
-
30
  gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
31
  gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
32
 
33
-
 
 
 
 
 
 
 
34
  with gr.Row():
35
  sample_images = [
36
  'https://imgur.com/W1pIr9b',
@@ -56,18 +62,18 @@ with gr.Blocks() as demo:
56
 
57
  def predict(img, model_name):
58
  if model_name == "CLIP-GPT2":
59
- return generate_caption_clipgpt(img)
60
  elif model_name == "ViT-GPT2":
61
- return generate_caption_vitgpt(img)
62
  elif model_name == "ViT-CoAttention":
63
  return generate_caption_vitCoAtt(img)
64
  else:
65
  return "Caption generation for this model is not yet implemented."
66
 
67
 
68
- # Event handlers
69
- generate_button.click(predict, [image, model_choice], caption) # Trigger prediction on button click
70
- sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption) # Handle sample images
71
 
72
 
73
  demo.launch()
 
11
 
12
 
13
  # Caption generation functions
14
+ def generate_caption_clipgpt(image, max_tokens, temperature):
15
  caption = clipGPT.generate_caption_clipgpt(image)
16
  return caption
17
 
18
+ def generate_caption_vitgpt(image, max_tokens, temperature):
19
  caption = vitGPT.generate_caption(image)
20
  return caption
21
 
 
26
 
27
  with gr.Blocks() as demo:
28
 
 
29
  gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
30
  gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
31
 
32
+ with gr.Row():
33
+ # ... (your existing image upload components)
34
+
35
+ with gr.Column(): # Column for dropdowns and model choice
36
+ max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
37
+ temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.7)
38
+ model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
39
+
40
  with gr.Row():
41
  sample_images = [
42
  'https://imgur.com/W1pIr9b',
 
62
 
63
  def predict(img, model_name):
64
  if model_name == "CLIP-GPT2":
65
+ return generate_caption_clipgpt(img, max_tokens, temperature)
66
  elif model_name == "ViT-GPT2":
67
+ return generate_caption_vitgpt(img, max_tokens, temperature)
68
  elif model_name == "ViT-CoAttention":
69
  return generate_caption_vitCoAtt(img)
70
  else:
71
  return "Caption generation for this model is not yet implemented."
72
 
73
 
74
+ # Event handlers
75
+ generate_button.click(predict, [image, model_choice, max_tokens, temperature], caption)
76
+ sample_images_gallery.change(predict, [sample_images_gallery, model_choice, max_tokens, temperature], caption)
77
 
78
 
79
  demo.launch()