|
|
|
|
|
from llmware.prompts import Prompt |
|
from llmware.configs import LLMWareConfig |
|
import os |
|
import time |
|
|
|
|
|
def contract_analysis_w_fact_checking (model_name): |
|
|
|
|
|
contracts_path = "/home/ubuntu/contracts/" |
|
|
|
print("\nupdate: loading model - ", model_name) |
|
|
|
|
|
prompter = Prompt().load_model(model_name) |
|
|
|
research = {"topic": "base salary", "prompt": "What is the executive's base salary?"} |
|
|
|
|
|
t0 = time.time() |
|
|
|
for i, contract in enumerate(os.listdir(contracts_path)): |
|
|
|
print("\nAnalyzing Contract - ", str(i+1), contract) |
|
print("Question: ", research["prompt"]) |
|
|
|
|
|
source = prompter.add_source_document(contracts_path, contract, query=research["topic"]) |
|
|
|
|
|
responses = prompter.prompt_with_source(research["prompt"], prompt_name="just_the_facts", temperature=0.3) |
|
|
|
|
|
ev_numbers = prompter.evidence_check_numbers(responses) |
|
ev_sources = prompter.evidence_check_sources(responses) |
|
ev_stats = prompter.evidence_comparison_stats(responses) |
|
z = prompter.classify_not_found_response(responses, parse_response=True, evidence_match=True,ask_the_model=False) |
|
|
|
for r, response in enumerate(responses): |
|
print("LLM Response: ", response["llm_response"]) |
|
print("Numbers: ", ev_numbers[r]["fact_check"]) |
|
print("Sources: ", ev_sources[r]["source_review"]) |
|
print("Stats: ", ev_stats[r]["comparison_stats"]) |
|
print("Not Found Check: ", z[r]) |
|
|
|
|
|
prompter.clear_source_materials() |
|
|
|
|
|
print("\nupdate: prompt state saved at: ", os.path.join(LLMWareConfig.get_prompt_path(),prompter.prompt_id)) |
|
print("update: time processing: ", time.time() - t0) |
|
|
|
prompter.save_state() |
|
|
|
return 0 |
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
model_name = "llmware/dragon-deci-6b-v0" |
|
contract_analysis_w_fact_checking(model_name) |
|
|
|
|