DAMHelper / app.py
enricorampazzo's picture
final command line implementation
d005419
raw
history blame
2.08 kB
from pathlib import Path
from llm_manager.llm_parser import LlmParser
from prompts.prompts_manager import PromptsManager
from repository.repository import get_repository
from repository.repository_abc import ModelRoles, Model
from form.form import build_form_data_from_answers, write_pdf_form
def check_for_missing_answers(parsed_questions: dict[int, str]):
return [k for k in parsed_questions if parsed_questions[k] is None]
def ask_again(missing_questions: list[int], user_questions: list[str], parsed_questions: dict[int, str]):
for id_ in missing_questions:
new_answer = input(f"I could not find the answer to this question: {user_questions[id_].lower()}")
parsed_questions[id_] = new_answer
if __name__ == '__main__':
prompts_manager = PromptsManager()
user_prompt = input(f"Please describe what you need to do. To get the best results "
f"try to answer all the following questions:\n{'\n'.join(prompts_manager.questions)}\n\n>")
repository = get_repository("intel_npu", Model("meta-llama/Meta-Llama-3-8B-Instruct",
ModelRoles("system", "user", "assistant")),
prompts_manager.system_prompt, Path("llm_log.txt"))
repository.init()
# repository.send_prompt(prompts_manager.ingest_user_answers(user_prompt))
answer = repository.send_prompt(prompts_manager.verify_user_input_prompt(user_prompt))
answers = LlmParser.parse_verification_prompt_answers(answer['content'])
missing_answers = check_for_missing_answers(answers)
while missing_answers:
ask_again(missing_answers, prompts_manager.questions, answers)
missing_answers = check_for_missing_answers(answers)
answer = repository.send_prompt(prompts_manager.get_work_category(answers[1]))
categories = LlmParser.parse_get_categories_answer(answer['content'])
form_data = build_form_data_from_answers(answers, categories, f"{Path(__file__, "..", "signature.png")}")
write_pdf_form(form_data, Path("signed_form1.pdf"))