LoRA gpt-j-6b
how-to
from peft import PeftModel
from transformers import GenerationConfig, AutoTokenizer, AutoConfig, GPTJForCausalLM
base_model='EleutherAI/gpt-j-6b'
temperature=0.7
top_p=0.75
top_k=40
num_beams=4
max_new_tokens=256
device = 'cuda'
template = {
"description": "Template used by Alpaca-LoRA.",
"prompt_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
"prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n",
"response_split": "### Response:"
}
model = GPTJForCausalLM.from_pretrained(
base_model,
torch_dtype=torch.float16,
device_map="auto",
)
model = PeftModel.from_pretrained(
model,
'mesolitica/gptj6b-finetune',
torch_dtype=torch.float16,
)
model.config.pad_token_id = tokenizer.pad_token_id = 0
model.half()
_ = model.eval()
q = """
camne nak pakai numpy
"""
prompt = template["prompt_no_input"].format(instruction=q)
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].to(device)
generation_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
)
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
)
s = generation_output.sequences[0]
output = tokenizer.decode(s)
output,
Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
camne nak pakai numpy
### Response:
Untuk menggunakan Numpy dalam Python, anda boleh menggunakan kod berikut:
import numpy as np
x = np.array([1, 2, 3, 4, 5])
print(x)
Ini akan mengembalikan array `[1, 2, 3, 4, 5]`.<|endoftext|>
- Downloads last month
- 0