File size: 6,221 Bytes
1986514 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
import sys
import torch
from peft import PeftModel
import transformers
import gradio as gr
# assert (
# "LlamaTokenizer" in transformers._import_structure["models.llama"]
# ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
tokenizer = AutoTokenizer.from_pretrained("VietAI/gpt-j-6B-vietnamese-news")
LOAD_8BIT = True
BASE_MODEL = "VietAI/gpt-j-6B-vietnamese-news"
# LORA_WEIGHTS = "tloen/alpaca-lora-7b"
# LORA_WEIGHTS = "hoaiht/alpaca-lora-7b-vi"
LORA_WEIGHTS = "hoaiht/vietnamese-alpaca-lora-gpt-j"
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
try:
if torch.backends.mps.is_available():
device = "mps"
except:
pass
if device == "cuda":
model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL,
load_in_8bit=LOAD_8BIT,
torch_dtype=torch.float16,
device_map="auto",
)
model = PeftModel.from_pretrained(
model,
LORA_WEIGHTS,
torch_dtype=torch.float16,
)
elif device == "mps":
model = transformers.AutoModelForCausalLM.from_pretrained(
BASE_MODEL,
device_map={"": device},
torch_dtype=torch.float16,
)
model = PeftModel.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
torch_dtype=torch.float16,
)
else:
model = transformers.AutoModelForCausalLM.from_pretrained(
BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
)
model = PeftModel.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
)
def generate_prompt(instruction, input=None):
if input:
# return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
return f"""Dưới đây là một hướng dẫn mô tả một tác vụ, kèm theo một đầu vào cung cấp thêm ngữ cảnh. Viết một phản hồi hoàn thành yêu cầu một cách thích hợp.
### Instruction:
{instruction}
### Input:
{input}
### Response:"""
else:
# return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
return f"""Dưới đây là một hướng dẫn mô tả một tác vụ. Viết một phản hồi hoàn thành yêu cầu một cách thích hợp.
### Instruction:
{instruction}
### Response:"""
if not LOAD_8BIT:
model.half() # seems to fix bugs for some users.
model.eval()
if torch.__version__ >= "2" and sys.platform != "win32":
model = torch.compile(model)
def evaluate(
instruction,
input=None,
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=4,
max_new_tokens=128,
**kwargs,
):
prompt = generate_prompt(instruction, input)
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].to(device)
generation_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
**kwargs,
)
with torch.no_grad():
# generation_output = model.generate(
# input_ids=input_ids,
# generation_config=generation_config,
# return_dict_in_generate=True,
# output_scores=True,
# max_new_tokens=max_new_tokens,
# )
gen_tokens = model.generate(
input_ids=input_ids,
max_length=max_new_tokens,
do_sample=True,
temperature=0.9,
top_k=20
)
# s = generation_output.sequences[0]
# output = tokenizer.decode(s)
output = tokenizer.batch_decode(gen_tokens)[0]
return output.split("### Response:")[1].strip()
gr.Interface(
fn=evaluate,
inputs=[
gr.components.Textbox(
lines=2, label="Instruction", value="3 điều cần làm để duy trì sức khỏe."
),
gr.components.Textbox(lines=2, label="Input", placeholder="none"),
gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"),
gr.components.Slider(
minimum=1, maximum=2000, step=1, value=128, label="Max tokens"
),
],
outputs=[
gr.inputs.Textbox(
lines=5,
label="Output",
)
],
title="🦙🌲 Instruct-tune `VietAI/gpt-j-6B-vietnamese-news` on Alpaca dataset (Vietnamese version) using Alpaca-LoRA",
# description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).",
).launch(share=True)
# Old testing code follows.
"""
if __name__ == "__main__":
# testing code for readme
for instruction in [
"Tell me about alpacas.",
"Tell me about the president of Mexico in 2019.",
"Tell me about the king of France in 2019.",
"List all Canadian provinces in alphabetical order.",
"Write a Python program that prints the first 10 Fibonacci numbers.",
"Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.",
"Tell me five words that rhyme with 'shock'.",
"Translate the sentence 'I have no mouth but I must scream' into Spanish.",
"Count up from 1 to 500.",
]:
print("Instruction:", instruction)
print("Response:", evaluate(instruction))
print()
"""
|