|
import datetime |
|
import json |
|
import os |
|
import shutil |
|
from typing import Optional |
|
from typing import Tuple |
|
from typing import Union |
|
|
|
import gradio as gr |
|
import requests |
|
import torch |
|
from fastchat.conversation import Conversation |
|
from fastchat.conversation import SeparatorStyle |
|
from fastchat.conversation import get_conv_template |
|
from fastchat.conversation import register_conv_template |
|
from fastchat.model.model_adapter import BaseAdapter |
|
from fastchat.model.model_adapter import load_model |
|
from fastchat.model.model_adapter import model_adapters |
|
from fastchat.serve.cli import SimpleChatIO |
|
from fastchat.serve.inference import generate_stream |
|
from huggingface_hub import Repository |
|
from huggingface_hub import snapshot_download |
|
from peft import LoraConfig |
|
from peft import PeftModel |
|
from peft import get_peft_model |
|
from peft import set_peft_model_state_dict |
|
import transformers |
|
from transformers import PreTrainedModel |
|
from transformers import PreTrainedTokenizerBase |
|
|
|
transformers.AutoTokenizer = transformers.LlamaTokenizer |
|
transformers.AutoModelForCausalLM = transformers.LlamaForCausalLM |
|
|
|
def load_lora_model( |
|
model_path: str, |
|
lora_weight: str, |
|
device: str, |
|
num_gpus: int, |
|
max_gpu_memory: Optional[str] = None, |
|
load_8bit: bool = False, |
|
cpu_offloading: bool = False, |
|
debug: bool = False, |
|
) -> Tuple[Union[PreTrainedModel, PeftModel], PreTrainedTokenizerBase]: |
|
model: Union[PreTrainedModel, PeftModel] |
|
tokenizer: PreTrainedTokenizerBase |
|
model, tokenizer = load_model( |
|
model_path=model_path, |
|
device=device, |
|
num_gpus=num_gpus, |
|
max_gpu_memory=max_gpu_memory, |
|
load_8bit=load_8bit, |
|
cpu_offloading=cpu_offloading, |
|
debug=debug, |
|
) |
|
if lora_weight is not None: |
|
|
|
config = LoraConfig.from_pretrained(lora_weight) |
|
model = get_peft_model(model, config) |
|
|
|
|
|
checkpoint_name = os.path.join( |
|
lora_weight, "pytorch_model.bin" |
|
) |
|
if not os.path.exists(checkpoint_name): |
|
checkpoint_name = os.path.join( |
|
lora_weight, "adapter_model.bin" |
|
) |
|
|
|
|
|
if os.path.exists(checkpoint_name): |
|
adapters_weights = torch.load(checkpoint_name) |
|
set_peft_model_state_dict(model, adapters_weights) |
|
else: |
|
raise IOError(f"Checkpoint {checkpoint_name} not found") |
|
|
|
if debug: |
|
print(model) |
|
|
|
return model, tokenizer |
|
|
|
|
|
print(datetime.datetime.now()) |
|
|
|
NUM_THREADS = 1 |
|
|
|
print(NUM_THREADS) |
|
|
|
print("starting server ...") |
|
|
|
BASE_MODEL = "decapoda-research/llama-13b-hf" |
|
LORA_WEIGHTS_HF = "izumi-lab/llama-13b-japanese-lora-v0-1ep" |
|
HF_TOKEN = os.environ.get("HF_TOKEN", None) |
|
DATASET_REPOSITORY = os.environ.get("DATASET_REPOSITORY", None) |
|
SLACK_WEBHOOK = os.environ.get("SLACK_WEBHOOK", None) |
|
|
|
LORA_WEIGHTS = snapshot_download(LORA_WEIGHTS_HF) |
|
|
|
repo = None |
|
LOCAL_DIR = "/home/user/data/" |
|
|
|
if HF_TOKEN and DATASET_REPOSITORY: |
|
try: |
|
shutil.rmtree(LOCAL_DIR) |
|
except Exception: |
|
pass |
|
|
|
repo = Repository( |
|
local_dir=LOCAL_DIR, |
|
clone_from=DATASET_REPOSITORY, |
|
use_auth_token=HF_TOKEN, |
|
repo_type="dataset", |
|
) |
|
repo.git_pull() |
|
|
|
if torch.cuda.is_available(): |
|
device = "cuda" |
|
else: |
|
device = "cpu" |
|
|
|
model, tokenizer = load_lora_model( |
|
model_path=BASE_MODEL, |
|
lora_weight=LORA_WEIGHTS, |
|
device=device, |
|
num_gpus=1, |
|
max_gpu_memory="16GiB", |
|
load_8bit=True, |
|
cpu_offloading=False, |
|
debug=False, |
|
) |
|
|
|
Conversation._get_prompt = Conversation.get_prompt |
|
Conversation._append_message = Conversation.append_message |
|
|
|
|
|
def conversation_append_message(cls, role: str, message: str): |
|
cls.offset = -2 |
|
return cls._append_message(role, message) |
|
|
|
|
|
def conversation_get_prompt_overrider(cls: Conversation) -> str: |
|
cls.messages = cls.messages[-2:] |
|
return cls._get_prompt() |
|
|
|
|
|
def save_inputs_and_outputs(now, inputs, outputs, generate_kwargs): |
|
current_hour = now.strftime("%Y-%m-%d_%H") |
|
file_name = f"prompts_{LORA_WEIGHTS.split('/')[-1]}_{current_hour}.jsonl" |
|
|
|
if repo is not None: |
|
repo.git_pull(rebase=True) |
|
with open(os.path.join(LOCAL_DIR, file_name), "a", encoding="utf-8") as f: |
|
json.dump( |
|
{ |
|
"inputs": inputs, |
|
"outputs": outputs, |
|
"generate_kwargs": generate_kwargs, |
|
}, |
|
f, |
|
ensure_ascii=False, |
|
) |
|
f.write("\n") |
|
repo.push_to_hub() |
|
|
|
|
|
|
|
|
|
def evaluate( |
|
instruction, |
|
temperature=0.7, |
|
max_tokens=256, |
|
repetition_penalty=1.0, |
|
): |
|
try: |
|
inputs = tokenizer(instruction, return_tensors="pt") |
|
if len(inputs["input_ids"][0]) > max_tokens - 40: |
|
if HF_TOKEN and DATASET_REPOSITORY: |
|
try: |
|
now = datetime.datetime.now() |
|
current_time = now.strftime("%Y-%m-%d %H:%M:%S") |
|
print(f"[{current_time}] Pushing prompt and completion to the Hub") |
|
save_inputs_and_outputs( |
|
now, |
|
instruction, |
|
"", |
|
{ |
|
"temperature": temperature, |
|
"max_tokens": max_tokens, |
|
"repetition_penalty": repetition_penalty, |
|
}, |
|
) |
|
except Exception as e: |
|
print(e) |
|
return ( |
|
f"please reduce the input length. Currently, {len(inputs['input_ids'][0])} ( > {max_tokens - 40}) tokens are used.", |
|
gr.update(interactive=True), |
|
gr.update(interactive=True), |
|
) |
|
|
|
conv = get_conv_template() |
|
|
|
conv.append_message(conv.roles[0], instruction) |
|
conv.append_message(conv.roles[1], None) |
|
|
|
generate_stream_func = generate_stream |
|
prompt = conv.get_prompt() |
|
|
|
gen_params = { |
|
"model": BASE_MODEL, |
|
"prompt": prompt, |
|
"temperature": temperature, |
|
"max_new_tokens": max_tokens - len(inputs["input_ids"][0]) - 30, |
|
"stop": conv.stop_str, |
|
"stop_token_ids": conv.stop_token_ids, |
|
"echo": False, |
|
"repetition_penalty": repetition_penalty, |
|
} |
|
chatio = SimpleChatIO() |
|
chatio.prompt_for_output(conv.roles[1]) |
|
output_stream = generate_stream_func(model, tokenizer, gen_params, device) |
|
output = chatio.stream_output(output_stream) |
|
|
|
if HF_TOKEN and DATASET_REPOSITORY: |
|
try: |
|
now = datetime.datetime.now() |
|
current_time = now.strftime("%Y-%m-%d %H:%M:%S") |
|
print(f"[{current_time}] Pushing prompt and completion to the Hub") |
|
save_inputs_and_outputs( |
|
now, |
|
prompt, |
|
output, |
|
{ |
|
"temperature": temperature, |
|
"max_tokens": max_tokens, |
|
"repetition_penalty": repetition_penalty, |
|
}, |
|
) |
|
except Exception as e: |
|
print(e) |
|
return output, gr.update(interactive=True), gr.update(interactive=True) |
|
except Exception as e: |
|
print(e) |
|
import traceback |
|
|
|
if SLACK_WEBHOOK: |
|
payload_dic = { |
|
"text": f"BASE_MODEL: {BASE_MODEL}\n LORA_WEIGHTS: {LORA_WEIGHTS}\n" |
|
+ f"instruction: {instruction}\ninput: {input}\ntemperature: {temperature}\n" |
|
+ f"max_tokens: {max_tokens}\nrepetition_penalty: {repetition_penalty}\n\n" |
|
+ str(traceback.format_exc()), |
|
"username": "Hugging Face Space", |
|
"channel": "#monitor", |
|
} |
|
|
|
try: |
|
requests.post(SLACK_WEBHOOK, data=json.dumps(payload_dic)) |
|
except Exception: |
|
pass |
|
return ( |
|
"Error happend. Please return later.", |
|
gr.update(interactive=True), |
|
gr.update(interactive=True), |
|
) |
|
|
|
|
|
def reset_textbox(): |
|
return gr.update(value=""), gr.update(value=""), gr.update(value="") |
|
|
|
|
|
def no_interactive() -> Tuple[gr.Request, gr.Request]: |
|
return gr.update(interactive=False), gr.update(interactive=False) |
|
|
|
|
|
title = """<h1 align="center">LLaMA-13B Japanese LoRA</h1>""" |
|
|
|
theme = gr.themes.Default(primary_hue="green") |
|
description = ( |
|
"The official demo for **[izumi-lab/llama-13b-japanese-lora-v0-1ep](https://huggingface.co/izumi-lab/llama-13b-japanese-lora-v0-1ep)**. " |
|
"It is a 13B-parameter LLaMA model finetuned to follow instructions. " |
|
"It is trained on the [izumi-lab/llm-japanese-dataset](https://huggingface.co/datasets/izumi-lab/llm-japanese-dataset) dataset. " |
|
"For more information, please visit [the project's website](https://llm.msuzuki.me). " |
|
"This model can output up to 256 tokens, but the maximum number of tokens is 200 due to the GPU memory limit of HuggingFace Space. " |
|
"It takes about **1 minute** to output. When access is concentrated, the operation may become slow." |
|
) |
|
with gr.Blocks( |
|
css="""#col_container { margin-left: auto; margin-right: auto;}""", |
|
theme=theme, |
|
) as demo: |
|
gr.HTML(title) |
|
gr.Markdown(description) |
|
with gr.Column(elem_id="col_container", visible=False) as main_block: |
|
with gr.Row(): |
|
with gr.Column(): |
|
instruction = gr.Textbox( |
|
lines=2, label="Instruction", placeholder="ใใใซใกใฏ" |
|
) |
|
inputs = gr.Textbox(lines=1, label="Input", placeholder="none") |
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
clear_button = gr.Button("Clear").style(full_width=True) |
|
with gr.Column(scale=5): |
|
submit_button = gr.Button("Submit").style(full_width=True) |
|
outputs = gr.Textbox(lines=4, label="Output") |
|
|
|
|
|
with gr.Accordion("Parameters", open=True): |
|
temperature = gr.Slider( |
|
minimum=0, |
|
maximum=1.0, |
|
value=0.7, |
|
step=0.05, |
|
interactive=False, |
|
label="Temperature", |
|
) |
|
max_tokens = gr.Slider( |
|
minimum=20, |
|
maximum=200, |
|
value=100, |
|
step=1, |
|
interactive=True, |
|
label="Max length (Pre-prompt + instruction + input + output)", |
|
) |
|
repetition_penalty = gr.Slider( |
|
minimum=1.0, |
|
maximum=5.0, |
|
value=1.2, |
|
step=0.1, |
|
interactive=True, |
|
label="Repetition penalty", |
|
) |
|
|
|
with gr.Column(elem_id="user_consent_container") as user_consent_block: |
|
|
|
gr.Markdown( |
|
""" |
|
## User Consent for Data Collection, Use, and Sharing: |
|
By using our app, you acknowledge and agree to the following terms regarding the data you provide: |
|
|
|
- **Collection**: We may collect inputs you type into our app. |
|
- **Use**: We may use the collected data for research purposes, to improve our services, and to develop new products or services, including commercial applications. |
|
- **Sharing and Publication**: Your input data may be published, shared with third parties, or used for analysis and reporting purposes. |
|
- **Data Retention**: We may retain your input data for as long as necessary. |
|
|
|
By continuing to use our app, you provide your explicit consent to the collection, use, and potential sharing of your data as described above. If you do not agree with our data collection, use, and sharing practices, please do not use our app. |
|
|
|
Please note that this space utilizes [decapoda-research/llama-13b-hf](https://huggingface.co/decapoda-research/llama-13b-hf) and its special license is applied. |
|
|
|
## ใใผใฟๅ้ใๅฉ็จใๅ
ฑๆใซ้ขใใใฆใผใถใผใฎๅๆ๏ผ |
|
ๆฌใขใใชใไฝฟ็จใใใใจใซใใใๆไพใใใใผใฟใซ้ขใใไปฅไธใฎๆกไปถใซๅๆใใใใฎใจใใพใ๏ผ |
|
|
|
- **ๅ้**: ๆฌใขใใชใซๅ
ฅๅใใใใใญในใใใผใฟใฏๅ้ใใใๅ ดๅใใใใพใใ |
|
- **ๅฉ็จ**: ๅ้ใใใใใผใฟใฏ็ ็ฉถใใๅ็จใขใใชใฑใผใทใงใณใๅซใใตใผใในใฎ้็บใซไฝฟ็จใใใๅ ดๅใใใใพใใ |
|
- **ๅ
ฑๆใใใณๅ
ฌ้**: ๅ
ฅๅใใผใฟใฏ็ฌฌไธ่
ใจๅ
ฑๆใใใใใๅๆใๅ
ฌ้ใฎ็ฎ็ใงไฝฟ็จใใใๅ ดๅใใใใพใใ |
|
- **ใใผใฟไฟๆ**: ๅ
ฅๅใใผใฟใฏๅฟ
่ฆใช้ใไฟๆใใใพใใ |
|
|
|
ๆฌใขใใชใๅผใ็ถใไฝฟ็จใใใใจใซใใใไธ่จใฎใใใซใใผใฟใฎๅ้ใปๅฉ็จใปๅ
ฑๆใซใคใใฆๅๆใใพใใใใผใฟใฎๅฉ็จๆนๆณใซๅๆใใชใๅ ดๅใฏใๆฌใขใใชใไฝฟ็จใใชใใงใใ ใใใ |
|
|
|
ใชใใใใฎในใใผในใฏ [decapoda-research/llama-13b-hf](https://huggingface.co/decapoda-research/llama-13b-hf) ใๅฉ็จใใฆใใใใใฎใฉใคใปใณในใ้ฉ็จใใใพใใ |
|
""" |
|
) |
|
accept_button = gr.Button("I Agree") |
|
|
|
def enable_inputs(): |
|
return user_consent_block.update(visible=False), main_block.update( |
|
visible=True |
|
) |
|
|
|
accept_button.click( |
|
fn=enable_inputs, |
|
inputs=[], |
|
outputs=[user_consent_block, main_block], |
|
queue=False, |
|
) |
|
submit_button.click(no_interactive, [], [submit_button, clear_button]) |
|
submit_button.click( |
|
evaluate, |
|
[instruction, temperature, max_tokens, repetition_penalty], |
|
[outputs, submit_button, clear_button], |
|
) |
|
clear_button.click(reset_textbox, [], [instruction, outputs], queue=False) |
|
|
|
demo.queue(max_size=20, concurrency_count=NUM_THREADS, api_open=False).launch( |
|
server_name="0.0.0.0", server_port=7860 |
|
) |
|
|