CoderCowMoo commited on
Commit
4f77518
β€’
1 Parent(s): d592427

import from transformers dumbass

Browse files
Files changed (1) hide show
  1. app.py +99 -99
app.py CHANGED
@@ -1,100 +1,100 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
4
- from bitsandbytes import BitsAndBytesConfig
5
- import spaces
6
- import torch
7
- from safetensors import safe_open
8
- from jaxtyping import Float, Int
9
- from typing import List, Callable
10
- from torch import Tensor
11
- from threading import Thread
12
- import einops
13
-
14
-
15
- tokenizer = AutoTokenizer.from_pretrained("NousResearch/Meta-LLaMA-70B-Instruct")
16
- quantization_config = BitsAndBytesConfig(load_in_4_bit=True)
17
- model = AutoModelForCausalLM.from_pretrained("NousResearch/Meta-LLaMA-70B-Instruct", quantization_config, device_map="cuda" ).eval()
18
-
19
-
20
- @spaces.GPU
21
- def respond(
22
- message,
23
- history: list[tuple[str, str]],
24
- system_message,
25
- max_tokens,
26
- temperature,
27
- top_p,
28
- ):
29
- messages = [{"role": "system", "content": system_message}]
30
-
31
- for val in history:
32
- if val[0]:
33
- messages.append({"role": "user", "content": val[0]})
34
- if val[1]:
35
- messages.append({"role": "assistant", "content": val[1]})
36
-
37
- messages.append({"role": "user", "content": message})
38
-
39
- response = ""
40
-
41
- inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
42
- streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True, skip_prompt=True)
43
-
44
- thread = Thread(
45
- target=model.generate,
46
- kwargs={
47
- "inputs": inputs,
48
- "max_new_tokens": max_tokens,
49
- "temperature": temperature,
50
- "top_p": top_p,
51
- "streamer": streamer,
52
- },
53
- )
54
- thread.start()
55
-
56
- for new_text in streamer:
57
- token = new_text.choices[0].delta.content
58
-
59
- response += token
60
- yield response
61
-
62
- def get_orthogonalized_matrix(matrix: Float[Tensor, '... d_model'], vec: Float[Tensor, 'd_model']) -> Float[Tensor, '... d_model']:
63
- device = matrix.device
64
- vec = vec.to(device)
65
- proj = einops.einsum(matrix, vec.view(-1, 1), '... d_model, d_model single -> ... single') * vec
66
- return matrix - proj
67
-
68
- """
69
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
70
- """
71
- demo = gr.ChatInterface(
72
- respond,
73
- additional_inputs=[
74
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
75
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
76
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
77
- gr.Slider(
78
- minimum=0.1,
79
- maximum=1.0,
80
- value=0.95,
81
- step=0.05,
82
- label="Top-p (nucleus sampling)",
83
- ),
84
- ],
85
- )
86
-
87
-
88
- if __name__ == "__main__":
89
- # get refusal_dir from refusal_dir.safetensors file.
90
- with safe_open("refusal_dir.safetensors", framework="pt", device="cpu") as f:
91
- refusal_dir = f.get_tensor("refusal_dir")
92
- refusal_dir = refusal_dir.cpu().float()
93
-
94
- model.model.embed_tokens.weight.data = get_orthogonalized_matrix(model.model.embed_tokens.weight, refusal_dir)
95
-
96
- for block in model.model.layers:
97
- block.self_attn.o_proj.weight.data = get_orthogonalized_matrix(block.self_attn.o_proj.weight, refusal_dir)
98
- block.mlp.down_proj.weight.data = get_orthogonalized_matrix(block.mlp.down_proj.weight.T, refusal_dir).T
99
-
100
  demo.launch()
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
4
+ from transformers import BitsAndBytesConfig
5
+ import spaces
6
+ import torch
7
+ from safetensors import safe_open
8
+ from jaxtyping import Float, Int
9
+ from typing import List, Callable
10
+ from torch import Tensor
11
+ from threading import Thread
12
+ import einops
13
+
14
+
15
+ tokenizer = AutoTokenizer.from_pretrained("NousResearch/Meta-LLaMA-70B-Instruct")
16
+ quantization_config = BitsAndBytesConfig(load_in_4_bit=True)
17
+ model = AutoModelForCausalLM.from_pretrained("NousResearch/Meta-LLaMA-70B-Instruct", quantization_config, device_map="cuda" ).eval()
18
+
19
+
20
+ @spaces.GPU
21
+ def respond(
22
+ message,
23
+ history: list[tuple[str, str]],
24
+ system_message,
25
+ max_tokens,
26
+ temperature,
27
+ top_p,
28
+ ):
29
+ messages = [{"role": "system", "content": system_message}]
30
+
31
+ for val in history:
32
+ if val[0]:
33
+ messages.append({"role": "user", "content": val[0]})
34
+ if val[1]:
35
+ messages.append({"role": "assistant", "content": val[1]})
36
+
37
+ messages.append({"role": "user", "content": message})
38
+
39
+ response = ""
40
+
41
+ inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
42
+ streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True, skip_prompt=True)
43
+
44
+ thread = Thread(
45
+ target=model.generate,
46
+ kwargs={
47
+ "inputs": inputs,
48
+ "max_new_tokens": max_tokens,
49
+ "temperature": temperature,
50
+ "top_p": top_p,
51
+ "streamer": streamer,
52
+ },
53
+ )
54
+ thread.start()
55
+
56
+ for new_text in streamer:
57
+ token = new_text.choices[0].delta.content
58
+
59
+ response += token
60
+ yield response
61
+
62
+ def get_orthogonalized_matrix(matrix: Float[Tensor, '... d_model'], vec: Float[Tensor, 'd_model']) -> Float[Tensor, '... d_model']:
63
+ device = matrix.device
64
+ vec = vec.to(device)
65
+ proj = einops.einsum(matrix, vec.view(-1, 1), '... d_model, d_model single -> ... single') * vec
66
+ return matrix - proj
67
+
68
+ """
69
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
70
+ """
71
+ demo = gr.ChatInterface(
72
+ respond,
73
+ additional_inputs=[
74
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
75
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
76
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
77
+ gr.Slider(
78
+ minimum=0.1,
79
+ maximum=1.0,
80
+ value=0.95,
81
+ step=0.05,
82
+ label="Top-p (nucleus sampling)",
83
+ ),
84
+ ],
85
+ )
86
+
87
+
88
+ if __name__ == "__main__":
89
+ # get refusal_dir from refusal_dir.safetensors file.
90
+ with safe_open("refusal_dir.safetensors", framework="pt", device="cpu") as f:
91
+ refusal_dir = f.get_tensor("refusal_dir")
92
+ refusal_dir = refusal_dir.cpu().float()
93
+
94
+ model.model.embed_tokens.weight.data = get_orthogonalized_matrix(model.model.embed_tokens.weight, refusal_dir)
95
+
96
+ for block in model.model.layers:
97
+ block.self_attn.o_proj.weight.data = get_orthogonalized_matrix(block.self_attn.o_proj.weight, refusal_dir)
98
+ block.mlp.down_proj.weight.data = get_orthogonalized_matrix(block.mlp.down_proj.weight.T, refusal_dir).T
99
+
100
  demo.launch()