Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
798ebc4
1
Parent(s):
5e2794d
format prompts and add debug log
Browse files- utils/models.py +26 -9
utils/models.py
CHANGED
@@ -39,22 +39,37 @@ def generate_summaries(example, model_a_name, model_b_name):
|
|
39 |
"""
|
40 |
if generation_interrupt.is_set():
|
41 |
return "", ""
|
42 |
-
|
43 |
context_text = ""
|
44 |
context_parts = []
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
47 |
if isinstance(ctx, dict) and "content" in ctx:
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
else:
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
question = example.get("question", "")
|
54 |
-
|
55 |
if generation_interrupt.is_set():
|
56 |
return "", ""
|
57 |
-
|
58 |
# Run model A
|
59 |
summary_a = run_inference(models[model_a_name], context_text, question)
|
60 |
|
@@ -105,6 +120,8 @@ def run_inference(model_name, context, question):
|
|
105 |
)
|
106 |
|
107 |
text_input = format_rag_prompt(question, context, accepts_sys)
|
|
|
|
|
108 |
|
109 |
# Check interrupt before generation
|
110 |
if generation_interrupt.is_set():
|
|
|
39 |
"""
|
40 |
if generation_interrupt.is_set():
|
41 |
return "", ""
|
42 |
+
|
43 |
context_text = ""
|
44 |
context_parts = []
|
45 |
+
|
46 |
+
if "full_contexts" in example and example["full_contexts"]:
|
47 |
+
for i, ctx in enumerate(example["full_contexts"]):
|
48 |
+
content = ""
|
49 |
+
|
50 |
+
# Extract content from either dict or string
|
51 |
if isinstance(ctx, dict) and "content" in ctx:
|
52 |
+
content = ctx["content"]
|
53 |
+
elif isinstance(ctx, str):
|
54 |
+
content = ctx
|
55 |
+
|
56 |
+
# Add document number if not already present
|
57 |
+
if not content.strip().startswith("Document"):
|
58 |
+
content = f"Document {i+1}:\n{content}"
|
59 |
+
|
60 |
+
context_parts.append(content)
|
61 |
+
|
62 |
+
context_text = "\n\n".join(context_parts)
|
63 |
else:
|
64 |
+
# Provide a graceful fallback instead of raising an error
|
65 |
+
print("Warning: No full context found in the example, using empty context")
|
66 |
+
context_text = ""
|
67 |
+
|
68 |
question = example.get("question", "")
|
69 |
+
|
70 |
if generation_interrupt.is_set():
|
71 |
return "", ""
|
72 |
+
|
73 |
# Run model A
|
74 |
summary_a = run_inference(models[model_a_name], context_text, question)
|
75 |
|
|
|
120 |
)
|
121 |
|
122 |
text_input = format_rag_prompt(question, context, accepts_sys)
|
123 |
+
print(type(text_input))
|
124 |
+
print(text_input)
|
125 |
|
126 |
# Check interrupt before generation
|
127 |
if generation_interrupt.is_set():
|