|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from transformers.generation.utils import GenerationConfig |
|
from peft import PeftModel, PeftConfig |
|
import json |
|
import csv |
|
|
|
|
|
model_path = "/root/lanyun-tmp/ZhipuAI/chatglm3-6b/" |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_path, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True |
|
) |
|
model.generation_config = GenerationConfig.from_pretrained(model_path) |
|
tokenizer = AutoTokenizer.from_pretrained( |
|
model_path, use_fast=False, trust_remote_code=True, |
|
) |
|
|
|
|
|
|
|
|
|
filename = '/root/lanyun-tmp/Dataset/test.jsonl' |
|
data = [] |
|
with open(filename, 'r') as f: |
|
for line in f: |
|
item = json.loads(line) |
|
data.append(item) |
|
|
|
|
|
|
|
files = 'chatglm3-CoT_answers.csv' |
|
with open(files, 'w', newline='') as csvfile: |
|
writer = csv.writer(csvfile) |
|
|
|
for item in data: |
|
context = item['context'] |
|
question = item['question'] |
|
answer0 = item['answer0'] |
|
answer1 = item['answer1'] |
|
answer2 = item['answer2'] |
|
answer3 = item['answer3'] |
|
|
|
|
|
|
|
messages = str([ |
|
{"role": "system", "content": """As a reading comprehension expert, you will receive context, question and four options. Please understand the context given below first, and then output the label of the correct option as the answer to the question based on the context.You Should Follow thinking steps below: |
|
1.Read the question and understand the requirements. |
|
2.Eliminate obviously incorrect options. |
|
3.Skim through the passage to find information that supports the remaining options. |
|
4.Choose the best answer based on the information in the passage."""}, |
|
{"role": "user", "content": str({'context':{context},'question':{question},"answer0":{answer0},"answer1":{answer1},"answer2":{answer2},"answer3":{answer3}})} |
|
]) |
|
response = model.chat(tokenizer, messages) |
|
|
|
answer = response[0][0] |
|
print(answer) |
|
writer.writerow(answer) |