Csplk commited on
Commit
471f9af
β€’
1 Parent(s): 32323f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -7
app.py CHANGED
@@ -38,14 +38,18 @@ def answer_questions(image_tuples, prompt_text):
38
  tokenizer=tokenizer,
39
  )
40
  answers.append(image_answers)
41
-
42
  data = []
43
  for i in range(len(image_tuples)):
44
  image_name = f"image{i+1}"
45
  image_answers = [answer[i] for answer in answers]
46
  print(f"image{i+1}_answers \n {image_answers} \n")
47
  data.append([image_name] + image_answers)
48
-
 
 
 
 
49
  result = {'headers': prompts, 'data': data}
50
  return result
51
  '''
@@ -63,8 +67,9 @@ def answer_questions(image_tuples, prompt_text):
63
 
64
  with gr.Blocks() as demo:
65
  gr.Markdown("# moondream2 unofficial batch processing demo")
66
- gr.Markdown("1. Select images\n2. Enter prompts (one prompt for each image provided) separated by commas. Ex: Describe this image, What is in this image?\n\n")
67
- gr.Markdown("*Tested and Running on free CPU space tier currently so results may take a bit to process compared to using GPU space hardware*")
 
68
  gr.Markdown("## πŸŒ” moondream2\nA tiny vision language model. [GitHub](https://github.com/vikhyatk/moondream)")
69
  with gr.Row():
70
  img = gr.Gallery(label="Upload Images", type="pil")
@@ -72,8 +77,8 @@ with gr.Blocks() as demo:
72
  prompt = gr.Textbox(label="Input Prompts", placeholder="Enter prompts (one prompt for each image provided) separated by commas. Ex: Describe this image, What is in this image?", lines=8)
73
  with gr.Row():
74
  submit = gr.Button("Submit")
75
- #output = gr.TextArea(label="Responses", lines=30)
76
- output = gr.Dataframw(type="array",wrap=True)
77
- submit.click(answer_questions, [img, prompt], output)
78
 
79
  demo.queue().launch()
 
38
  tokenizer=tokenizer,
39
  )
40
  answers.append(image_answers)
41
+
42
  data = []
43
  for i in range(len(image_tuples)):
44
  image_name = f"image{i+1}"
45
  image_answers = [answer[i] for answer in answers]
46
  print(f"image{i+1}_answers \n {image_answers} \n")
47
  data.append([image_name] + image_answers)
48
+
49
+ for question, answer in zip(prompts, answers):
50
+ Q_and_A += (f"Q: {question}\nA: {answer}\n\n")
51
+ print(f"\n\n{Q_and_A}\n\n")
52
+
53
  result = {'headers': prompts, 'data': data}
54
  return result
55
  '''
 
67
 
68
  with gr.Blocks() as demo:
69
  gr.Markdown("# moondream2 unofficial batch processing demo")
70
+ gr.Markdown("1. Select images\n2. Enter one or more prompts separated by commas. Ex: Describe this image, What is in this image?\n\n")
71
+ gr.Markdown("**Currently each image will be sent as a batch with the prompts thus asking each promp on each image**")
72
+ gr.Markdown("*Running on free CPU space tier currently so results may take a bit to process compared to duplicating space and using GPU space hardware*")
73
  gr.Markdown("## πŸŒ” moondream2\nA tiny vision language model. [GitHub](https://github.com/vikhyatk/moondream)")
74
  with gr.Row():
75
  img = gr.Gallery(label="Upload Images", type="pil")
 
77
  prompt = gr.Textbox(label="Input Prompts", placeholder="Enter prompts (one prompt for each image provided) separated by commas. Ex: Describe this image, What is in this image?", lines=8)
78
  with gr.Row():
79
  submit = gr.Button("Submit")
80
+ output = gr.TextArea(label="Questions and Answers", lines=30)
81
+ output2 = gr.Dataframe(label="Structured Dataframe", type="array",wrap=True)
82
+ submit.click(answer_questions, [img, prompt], output, output2)
83
 
84
  demo.queue().launch()