Update utils.py
Browse files
utils.py
CHANGED
@@ -286,7 +286,7 @@ def grade_documents_direct(prompt, documents):
|
|
286 |
|
287 |
# Data model
|
288 |
class grade(BaseModel):
|
289 |
-
#Binary score for relevance check.
|
290 |
binary_score: str = Field(description="Relevanz Bewertung 'ja' oder 'nein'")
|
291 |
|
292 |
# LLM
|
@@ -326,10 +326,12 @@ def grade_documents_direct(prompt, documents):
|
|
326 |
|
327 |
# Chain
|
328 |
chain = prompt_gesamt | llm_with_tool | parser_tool
|
329 |
-
|
|
|
330 |
# Score
|
331 |
filtered_docs = []
|
332 |
for d in documents:
|
|
|
333 |
score = chain.invoke({"question": prompt, "context": d.page_content})
|
334 |
grade = score[0].binary_score
|
335 |
if grade == "ja":
|
|
|
286 |
|
287 |
# Data model
|
288 |
class grade(BaseModel):
|
289 |
+
#Binary score for relevance check.
|
290 |
binary_score: str = Field(description="Relevanz Bewertung 'ja' oder 'nein'")
|
291 |
|
292 |
# LLM
|
|
|
326 |
|
327 |
# Chain
|
328 |
chain = prompt_gesamt | llm_with_tool | parser_tool
|
329 |
+
print("test+++++++++++++++")
|
330 |
+
print(prompt)
|
331 |
# Score
|
332 |
filtered_docs = []
|
333 |
for d in documents:
|
334 |
+
print(d.page.content)
|
335 |
score = chain.invoke({"question": prompt, "context": d.page_content})
|
336 |
grade = score[0].binary_score
|
337 |
if grade == "ja":
|