Upload 2 files
Browse files
generation_test_hf_script.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
import torch
|
5 |
+
|
6 |
+
|
7 |
+
def load_rag_benchmark_tester_ds():
|
8 |
+
|
9 |
+
# pull 200 question rag benchmark test dataset from LLMWare HuggingFace repo
|
10 |
+
from datasets import load_dataset
|
11 |
+
|
12 |
+
ds_name = "llmware/rag_instruct_benchmark_tester"
|
13 |
+
|
14 |
+
dataset = load_dataset(ds_name)
|
15 |
+
|
16 |
+
print("update: loading RAG Benchmark test dataset - ", dataset)
|
17 |
+
|
18 |
+
test_set = []
|
19 |
+
for i, samples in enumerate(dataset["train"]):
|
20 |
+
test_set.append(samples)
|
21 |
+
|
22 |
+
# to view test set samples
|
23 |
+
# print("rag benchmark dataset test samples: ", i, samples)
|
24 |
+
|
25 |
+
return test_set
|
26 |
+
|
27 |
+
|
28 |
+
def run_test(model_name, test_ds):
|
29 |
+
|
30 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
31 |
+
|
32 |
+
print("\nRAG Performance Test - 200 questions")
|
33 |
+
print("update: model - ", model_name)
|
34 |
+
print("update: device - ", device)
|
35 |
+
|
36 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
37 |
+
model.to(device)
|
38 |
+
|
39 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
40 |
+
|
41 |
+
for i, entries in enumerate(test_ds):
|
42 |
+
|
43 |
+
# prepare prompt packaging used in fine-tuning process
|
44 |
+
# note: in our testing, Yi model performed better with trailing "\n" at end of prompt
|
45 |
+
new_prompt = "<human>: " + entries["context"] + "\n" + entries["query"] + "\n" + "<bot>:" + "\n"
|
46 |
+
|
47 |
+
inputs = tokenizer(new_prompt, return_tensors="pt")
|
48 |
+
start_of_output = len(inputs.input_ids[0])
|
49 |
+
|
50 |
+
# temperature: set at 0.3 for consistency of output
|
51 |
+
# max_new_tokens: set at 100 - may prematurely stop a few of the summaries
|
52 |
+
|
53 |
+
outputs = model.generate(
|
54 |
+
inputs.input_ids.to(device),
|
55 |
+
eos_token_id=tokenizer.eos_token_id,
|
56 |
+
pad_token_id=tokenizer.eos_token_id,
|
57 |
+
do_sample=True,
|
58 |
+
temperature=0.3,
|
59 |
+
max_new_tokens=100,
|
60 |
+
)
|
61 |
+
|
62 |
+
output_only = tokenizer.decode(outputs[0][start_of_output:],skip_special_tokens=True)
|
63 |
+
|
64 |
+
# quick/optional post-processing clean-up of potential fine-tuning artifacts
|
65 |
+
|
66 |
+
eot = output_only.find("<|endoftext|>")
|
67 |
+
if eot > -1:
|
68 |
+
output_only = output_only[:eot]
|
69 |
+
|
70 |
+
bot = output_only.find("<bot>:")
|
71 |
+
if bot > -1:
|
72 |
+
output_only = output_only[bot+len("<bot>:"):]
|
73 |
+
|
74 |
+
# end - post-processing
|
75 |
+
|
76 |
+
print("\n")
|
77 |
+
print(i, "llm_response - ", output_only)
|
78 |
+
print(i, "gold_answer - ", entries["answer"])
|
79 |
+
|
80 |
+
return 0
|
81 |
+
|
82 |
+
|
83 |
+
if __name__ == "__main__":
|
84 |
+
|
85 |
+
test_ds = load_rag_benchmark_tester_ds()
|
86 |
+
|
87 |
+
model_name = "llmware/dragon-deci-7b-v0"
|
88 |
+
output = run_test(model_name,test_ds)
|
89 |
+
|
90 |
+
|
generation_test_llmware_script.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from llmware.prompts import Prompt
|
3 |
+
|
4 |
+
|
5 |
+
def load_rag_benchmark_tester_ds():
|
6 |
+
|
7 |
+
# pull 200 question rag benchmark test dataset from LLMWare HuggingFace repo
|
8 |
+
from datasets import load_dataset
|
9 |
+
|
10 |
+
ds_name = "llmware/rag_instruct_benchmark_tester"
|
11 |
+
|
12 |
+
dataset = load_dataset(ds_name)
|
13 |
+
|
14 |
+
print("update: loading RAG Benchmark test dataset - ", dataset)
|
15 |
+
|
16 |
+
test_set = []
|
17 |
+
for i, samples in enumerate(dataset["train"]):
|
18 |
+
test_set.append(samples)
|
19 |
+
|
20 |
+
# to view test set samples
|
21 |
+
# print("rag benchmark dataset test samples: ", i, samples)
|
22 |
+
|
23 |
+
return test_set
|
24 |
+
|
25 |
+
|
26 |
+
def run_test(model_name, prompt_list):
|
27 |
+
|
28 |
+
print("\nupdate: Starting RAG Benchmark Inference Test - ", model_name)
|
29 |
+
|
30 |
+
# pull DRAGON / BLING model directly from catalog, e.g., no from_hf=True
|
31 |
+
prompter = Prompt().load_model(model_name)
|
32 |
+
|
33 |
+
for i, entries in enumerate(prompt_list):
|
34 |
+
|
35 |
+
prompt = entries["query"]
|
36 |
+
context = entries["context"]
|
37 |
+
|
38 |
+
response = prompter.prompt_main(prompt,context=context,prompt_name="default_with_context", temperature=0.3)
|
39 |
+
|
40 |
+
print("\nupdate: model inference output - ", i, response["llm_response"])
|
41 |
+
print("update: gold_answer - ", i, entries["answer"])
|
42 |
+
|
43 |
+
fc = prompter.evidence_check_numbers(response)
|
44 |
+
sc = prompter.evidence_comparison_stats(response)
|
45 |
+
sr = prompter.evidence_check_sources(response)
|
46 |
+
|
47 |
+
print("\nFact-Checking Tools")
|
48 |
+
|
49 |
+
for entries in fc:
|
50 |
+
for f, facts in enumerate(entries["fact_check"]):
|
51 |
+
print("update: fact check - ", f, facts)
|
52 |
+
|
53 |
+
for entries in sc:
|
54 |
+
print("update: comparison stats - ", entries["comparison_stats"])
|
55 |
+
|
56 |
+
for entries in sr:
|
57 |
+
for s, sources in enumerate(entries["source_review"]):
|
58 |
+
print("update: sources - ", s, sources)
|
59 |
+
|
60 |
+
return 0
|
61 |
+
|
62 |
+
|
63 |
+
if __name__ == "__main__":
|
64 |
+
|
65 |
+
core_test_set = load_rag_benchmark_tester_ds()
|
66 |
+
|
67 |
+
# one of the 7 gpu dragon models
|
68 |
+
gpu_model_name = "llmware/dragon-deci-7b-v0"
|
69 |
+
|
70 |
+
output = run_test(gpu_model_name, core_test_set)
|