Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -23,9 +23,9 @@ def answer_questions(image_tuples, prompt_text):
|
|
| 23 |
result = ""
|
| 24 |
Q_and_A = ""
|
| 25 |
prompts = [p.strip() for p in prompt_text.split(',')] # Splitting and cleaning prompts
|
| 26 |
-
print(f"prompts: {prompts}\n")
|
| 27 |
image_embeds = [img[0] for img in image_tuples if img[0] is not None] # Extracting images from tuples, ignoring None
|
| 28 |
|
|
|
|
| 29 |
answers = []
|
| 30 |
for prompt in prompts:
|
| 31 |
image_answers = moondream.batch_answer(
|
|
@@ -34,19 +34,16 @@ def answer_questions(image_tuples, prompt_text):
|
|
| 34 |
tokenizer=tokenizer,
|
| 35 |
)
|
| 36 |
answers.append(image_answers)
|
| 37 |
-
|
| 38 |
-
data = []
|
| 39 |
-
for i in range(len(image_tuples)):
|
| 40 |
-
image_name = f"image{i+1}"
|
| 41 |
-
image_answers = [answer[i] for answer in answers]
|
| 42 |
-
print(f"image{i+1}_answers \n {image_answers} \n")
|
| 43 |
-
data.append([image_name] + image_answers)
|
| 44 |
-
|
| 45 |
-
for question, answer in zip(prompts, answers):
|
| 46 |
-
Q_and_A += (f"Q: {question}\nA: {answer}\n\n")
|
| 47 |
-
print(f"\n\n{Q_and_A}\n\n")
|
| 48 |
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
return Q_and_A, result
|
| 51 |
|
| 52 |
with gr.Blocks() as demo:
|
|
@@ -63,6 +60,6 @@ with gr.Blocks() as demo:
|
|
| 63 |
submit = gr.Button("Submit")
|
| 64 |
output = gr.TextArea(label="Questions and Answers", lines=30)
|
| 65 |
output2 = gr.Dataframe(label="Structured Dataframe", type="array",wrap=True)
|
| 66 |
-
submit.click(answer_questions, [img, prompt], output, output2)
|
| 67 |
|
| 68 |
demo.queue().launch()
|
|
|
|
| 23 |
result = ""
|
| 24 |
Q_and_A = ""
|
| 25 |
prompts = [p.strip() for p in prompt_text.split(',')] # Splitting and cleaning prompts
|
|
|
|
| 26 |
image_embeds = [img[0] for img in image_tuples if img[0] is not None] # Extracting images from tuples, ignoring None
|
| 27 |
|
| 28 |
+
print(f"\nprompts: {prompts}\n\n")
|
| 29 |
answers = []
|
| 30 |
for prompt in prompts:
|
| 31 |
image_answers = moondream.batch_answer(
|
|
|
|
| 34 |
tokenizer=tokenizer,
|
| 35 |
)
|
| 36 |
answers.append(image_answers)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
+
for i, prompt in enumerate(prompts):
|
| 39 |
+
Q_and_A += f"Q: {prompt}\n\n"
|
| 40 |
+
for j, image_tuple in enumerate(image_tuples):
|
| 41 |
+
image_name = f"image{j+1}"
|
| 42 |
+
answer_text = answers[i][j] # Retrieve the answer for the i-th prompt for the j-th image
|
| 43 |
+
Q_and_A += f"{image_name} A:\n{answer_text}\n\n"
|
| 44 |
+
|
| 45 |
+
result = {'headers': prompts, 'data': answers} # Updated result handling
|
| 46 |
+
print(f"result\n{result}\n\nQ_and_A\n{Q_and_A}\n\n")
|
| 47 |
return Q_and_A, result
|
| 48 |
|
| 49 |
with gr.Blocks() as demo:
|
|
|
|
| 60 |
submit = gr.Button("Submit")
|
| 61 |
output = gr.TextArea(label="Questions and Answers", lines=30)
|
| 62 |
output2 = gr.Dataframe(label="Structured Dataframe", type="array",wrap=True)
|
| 63 |
+
submit.click(answer_questions, [img, prompt], [output, output2])
|
| 64 |
|
| 65 |
demo.queue().launch()
|