KashiwaByte's picture
initial commit
7c93e9d
raw
history blame
1.91 kB
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig
from peft import PeftModel, PeftConfig
import json
import csv
lora_path = "/root/lanyun-tmp/output/MiniCPM/checkpoint-9000/"
model_path = '/root/lanyun-tmp/OpenBMB/MiniCPM-2B-sft-fp32'
model = AutoModelForCausalLM.from_pretrained(
model_path, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True
)
model.generation_config = GenerationConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=False, trust_remote_code=True,
)
model = PeftModel.from_pretrained(model, lora_path
)
# 读取JSONL文件
filename = '/root/lanyun-tmp/Dataset/test.jsonl'
data = []
with open(filename, 'r') as f:
for line in f:
item = json.loads(line)
data.append(item)
files = 'MiniCPM2B-ZH-_answers.csv'
with open(files, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
# 提取内容
for item in data:
context = item['context']
question = item['question']
answer0 = item['answer0']
answer1 = item['answer1']
answer2 = item['answer2']
answer3 = item['answer3']
messages = str([
{"role": "system", "content": "作为阅读理解专家,你​​将收到上下文,问题和四个选项,请先理解下面给出的上下文,然后根据上下文输出正确选项的标签作为问题的答案}"},
{"role": "user", "content": str({'context':{context},'question':{question},"answer0":{answer0},"answer1":{answer1},"answer2":{answer2},"answer3":{answer3}})},
])
response = model.chat(tokenizer, messages)
answer = response[0][0]
print(answer)
writer.writerow(answer)