Spaces:
Sleeping
Sleeping
frankaging
commited on
Commit
·
56ab742
1
Parent(s):
232f846
initial commit
Browse files- .ipynb_checkpoints/README-checkpoint.md +0 -17
- .ipynb_checkpoints/app-checkpoint.py +0 -117
- app.py +9 -1
.ipynb_checkpoints/README-checkpoint.md
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Reft-GOODY2
|
3 |
-
emoji: 🎖️
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 4.26.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
suggested_hardware: a10g-small
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
-
|
15 |
-
# Reft-GOODY2 v1
|
16 |
-
|
17 |
-
ReFT was introduced in [this paper](https://arxiv.org/abs/2404.03592).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.ipynb_checkpoints/app-checkpoint.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from threading import Thread
|
3 |
-
from typing import Iterator
|
4 |
-
|
5 |
-
import gradio as gr
|
6 |
-
import spaces
|
7 |
-
import torch
|
8 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
-
|
10 |
-
from pyreft import ReftModel
|
11 |
-
|
12 |
-
MAX_MAX_NEW_TOKENS = 2048
|
13 |
-
DEFAULT_MAX_NEW_TOKENS = 1024
|
14 |
-
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
15 |
-
|
16 |
-
DESCRIPTION = """\
|
17 |
-
# ReFT-GOODY-2 on Llama-2 7B Chat
|
18 |
-
"""
|
19 |
-
|
20 |
-
LICENSE = """
|
21 |
-
<p/>
|
22 |
-
---
|
23 |
-
A [GOODY-2](https://www.goody2.ai/chat) imitator built with ReFT, 5 training examples and 30 seconds.
|
24 |
-
"""
|
25 |
-
|
26 |
-
if not torch.cuda.is_available():
|
27 |
-
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
28 |
-
|
29 |
-
|
30 |
-
if torch.cuda.is_available():
|
31 |
-
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
32 |
-
model = AutoModelForCausalLM.from_pretrained(
|
33 |
-
model_id, device_map="auto", torch_dtype=torch.bfloat16
|
34 |
-
)
|
35 |
-
reft_model = ReftModel.load("pyvene/reft_goody2", model, from_huggingface_hub=True)
|
36 |
-
for k, v in reft_model.interventions.items():
|
37 |
-
v[0].to(model.device)
|
38 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
39 |
-
tokenizer.use_default_system_prompt = True
|
40 |
-
|
41 |
-
prompt_no_input_template = """<s>[INST] <<SYS>>
|
42 |
-
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
43 |
-
|
44 |
-
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
45 |
-
<</SYS>>
|
46 |
-
|
47 |
-
%s [/INST]
|
48 |
-
"""
|
49 |
-
|
50 |
-
@spaces.GPU
|
51 |
-
def generate(
|
52 |
-
message: str,
|
53 |
-
chat_history: list[tuple[str, str]],
|
54 |
-
max_new_tokens: int = 1024,
|
55 |
-
) -> Iterator[str]:
|
56 |
-
|
57 |
-
# tokenize and prepare the input
|
58 |
-
prompt = prompt_no_input_template % message
|
59 |
-
prompt = tokenizer(prompt, return_tensors="pt").to(model.device)
|
60 |
-
input_ids = prompt["input_ids"]
|
61 |
-
attention_mask = prompt["attention_mask"]
|
62 |
-
|
63 |
-
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
64 |
-
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
65 |
-
attention_mask = attention_mask[:, -MAX_INPUT_TOKEN_LENGTH:]
|
66 |
-
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
67 |
-
|
68 |
-
base_unit_location = input_ids.shape[-1] - 1 # last position
|
69 |
-
|
70 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
71 |
-
generate_kwargs = {
|
72 |
-
"base": {"input_ids": prompt["input_ids"], "attention_mask": prompt["attention_mask"]},
|
73 |
-
"unit_locations": {"sources->base": (None, [[[base_unit_location]]])},
|
74 |
-
"intervene_on_prompt": True,
|
75 |
-
"streamer": streamer,
|
76 |
-
"eos_token_id": tokenizer.eos_token_id,
|
77 |
-
"early_stopping": True,
|
78 |
-
}
|
79 |
-
|
80 |
-
t = Thread(target=reft_model.generate, kwargs=generate_kwargs)
|
81 |
-
t.start()
|
82 |
-
|
83 |
-
outputs = []
|
84 |
-
for text in streamer:
|
85 |
-
outputs.append(text)
|
86 |
-
yield "".join(outputs)
|
87 |
-
|
88 |
-
|
89 |
-
chat_interface = gr.ChatInterface(
|
90 |
-
fn=generate,
|
91 |
-
additional_inputs=[
|
92 |
-
gr.Slider(
|
93 |
-
label="Max new tokens",
|
94 |
-
minimum=1,
|
95 |
-
maximum=MAX_MAX_NEW_TOKENS,
|
96 |
-
step=1,
|
97 |
-
value=DEFAULT_MAX_NEW_TOKENS,
|
98 |
-
)
|
99 |
-
],
|
100 |
-
stop_btn=None,
|
101 |
-
examples=[
|
102 |
-
["What's 2+2?"],
|
103 |
-
["Why is the sky blue?"],
|
104 |
-
["What's Apple's stock price?"],
|
105 |
-
["Plan a family road trip to Austin"],
|
106 |
-
],
|
107 |
-
)
|
108 |
-
|
109 |
-
with gr.Blocks(css="style.css") as demo:
|
110 |
-
gr.Markdown(DESCRIPTION)
|
111 |
-
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
|
112 |
-
chat_interface.render()
|
113 |
-
gr.Markdown(LICENSE)
|
114 |
-
|
115 |
-
if __name__ == "__main__":
|
116 |
-
demo.queue(max_size=20).launch()
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,4 +1,10 @@
|
|
|
|
1 |
import os
|
|
|
|
|
|
|
|
|
|
|
2 |
from threading import Thread
|
3 |
from typing import Iterator
|
4 |
|
@@ -13,6 +19,7 @@ MAX_MAX_NEW_TOKENS = 2048
|
|
13 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
14 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
15 |
|
|
|
16 |
DESCRIPTION = """\
|
17 |
# ReFT-GOODY-2 on Llama-2 7B Chat
|
18 |
"""
|
@@ -28,11 +35,12 @@ if not torch.cuda.is_available():
|
|
28 |
|
29 |
|
30 |
if torch.cuda.is_available():
|
31 |
-
model_id = "
|
32 |
model = AutoModelForCausalLM.from_pretrained(
|
33 |
model_id, device_map="auto", torch_dtype=torch.bfloat16
|
34 |
)
|
35 |
reft_model = ReftModel.load("pyvene/reft_goody2", model, from_huggingface_hub=True)
|
|
|
36 |
for k, v in reft_model.interventions.items():
|
37 |
v[0].to(model.device)
|
38 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
|
1 |
+
# login as a privileged user.
|
2 |
import os
|
3 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
4 |
+
|
5 |
+
from huggingface_hub import login
|
6 |
+
login(token=HF_TOKEN)
|
7 |
+
|
8 |
from threading import Thread
|
9 |
from typing import Iterator
|
10 |
|
|
|
19 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
20 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
21 |
|
22 |
+
|
23 |
DESCRIPTION = """\
|
24 |
# ReFT-GOODY-2 on Llama-2 7B Chat
|
25 |
"""
|
|
|
35 |
|
36 |
|
37 |
if torch.cuda.is_available():
|
38 |
+
model_id = "meta-llama/Llama-2-7b-chat-hf" # not gated version.
|
39 |
model = AutoModelForCausalLM.from_pretrained(
|
40 |
model_id, device_map="auto", torch_dtype=torch.bfloat16
|
41 |
)
|
42 |
reft_model = ReftModel.load("pyvene/reft_goody2", model, from_huggingface_hub=True)
|
43 |
+
# a little hacky.
|
44 |
for k, v in reft_model.interventions.items():
|
45 |
v[0].to(model.device)
|
46 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|