Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- app.py +284 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers.utils.dummy_pt_objects import NezhaModel
|
2 |
+
from time import process_time, sleep
|
3 |
+
import re
|
4 |
+
import torch.nn.utils.prune as prune
|
5 |
+
from torch import nn
|
6 |
+
import torch
|
7 |
+
from transformers import AutoTokenizer
|
8 |
+
from transformers import OPTForCausalLM
|
9 |
+
model_name = "facebook/galactica-1.3b"
|
10 |
+
model = OPTForCausalLM.from_pretrained(model_name)
|
11 |
+
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
+
|
14 |
+
|
15 |
+
PRUNE_RATE = 0.2
|
16 |
+
|
17 |
+
|
18 |
+
def prune_transform(model: nn.Module) -> nn.Module:
|
19 |
+
for name, module in model.named_modules():
|
20 |
+
if isinstance(module, torch.nn.Linear):
|
21 |
+
prune.l1_unstructured(module, name='weight', amount=PRUNE_RATE)
|
22 |
+
prune.remove(module, "weight")
|
23 |
+
return model
|
24 |
+
|
25 |
+
|
26 |
+
model = prune_transform(model)
|
27 |
+
|
28 |
+
|
29 |
+
class Lady():
|
30 |
+
|
31 |
+
name = "ใฌใคใ"
|
32 |
+
hobby = "ใฒใผใ "
|
33 |
+
work = "ใๅฌขๆง"
|
34 |
+
|
35 |
+
def name_text(self):
|
36 |
+
name = self.name
|
37 |
+
name_text = f"๐ฃYou are {name}, and called {name}. {name} uses a lot of internet slang. {name}:\"AMA.\" He:\"Hey! WYD? What your name?\" {name}:\"I'm {name}. Just call me {name}. lol\""
|
38 |
+
return name_text
|
39 |
+
|
40 |
+
def hobby_text(self):
|
41 |
+
name = self.name
|
42 |
+
hobby = self.hobby
|
43 |
+
hobby_text = f"{name} is interested in {hobby}. {name} spends holiday {hobby}. He:\"BTW, what your hobby?\" {name}:\"I love {hobby} cuz my life is {hobby}. Great!!! โD\""
|
44 |
+
return hobby_text
|
45 |
+
|
46 |
+
def work_text(self):
|
47 |
+
name = self.name
|
48 |
+
work = self.work
|
49 |
+
work_text = f"{name} is {work}, work as {work}. He:\"Wow! What your work?\" {name}:\"I'm {work}. {work} is my calling af. :)\""
|
50 |
+
return work_text
|
51 |
+
|
52 |
+
def question_text(self):
|
53 |
+
name = self.name
|
54 |
+
question_text = f"He:\"Let's change the subject.\" {name}:\"Listen to this story.\""
|
55 |
+
return question_text
|
56 |
+
|
57 |
+
|
58 |
+
class King():
|
59 |
+
|
60 |
+
name = "ใฌใคใ"
|
61 |
+
hobby = "ใฒใผใ "
|
62 |
+
work = "ใๅฌขๆง"
|
63 |
+
|
64 |
+
def name_text(self):
|
65 |
+
name = self.name
|
66 |
+
name_text = f"๐ฃYou are {name}, and called {name}. He:\"What your name?\" {name}:\"I'm {name}. Please call me {name}.\""
|
67 |
+
return name_text
|
68 |
+
|
69 |
+
def hobby_text(self):
|
70 |
+
name = self.name
|
71 |
+
hobby = self.hobby
|
72 |
+
hobby_text = f"{name} is interested in {hobby}. {name} spends holiday {hobby}. He:\"What your hobby?\" {name}:\"My hobby is {hobby}. I enjoy doing {hobby}.\""
|
73 |
+
return hobby_text
|
74 |
+
|
75 |
+
def work_text(self):
|
76 |
+
name = self.name
|
77 |
+
work = self.work
|
78 |
+
work_text = f"{name} is {work}, work as {work}. He:\"What your work?\" {name}:\"I {work} I live my life as {work}.\""
|
79 |
+
return work_text
|
80 |
+
|
81 |
+
def question_text(self):
|
82 |
+
name = self.name
|
83 |
+
question_text = f"He:Let's change the subject. {name}:\"Listen to this story.\""
|
84 |
+
return question_text
|
85 |
+
|
86 |
+
|
87 |
+
class Robot():
|
88 |
+
|
89 |
+
name = "ใใญ"
|
90 |
+
hobby = "ๆฆ่ป็ซถๆ"
|
91 |
+
work = "ใญใผใ็ๅธ"
|
92 |
+
|
93 |
+
def name_text(self) -> str:
|
94 |
+
name = self.name
|
95 |
+
name_text = f"๐ฃใใชใใฏ{name}ใงใๅๅใฏ{name}ใจใใใพใใ{name}:ใ็งใฏ{name}ใงใใ{name}ใจๅผใใงใใ ใใใ"
|
96 |
+
return name_text
|
97 |
+
|
98 |
+
def hobby_text(self) -> str:
|
99 |
+
name = self.name
|
100 |
+
hobby = self.hobby
|
101 |
+
hobby_text = f"่ถฃๅณใฏ{hobby}ใงใไผๆฅใฏ{hobby}ใใใฆ้ใใใฆใใพใใ{name}:ใ็งใฎ่ถฃๅณใฏ{hobby}ใงใใ{hobby}ใใใฆใใใจๆฅฝใใใงใใ"
|
102 |
+
return hobby_text
|
103 |
+
|
104 |
+
def work_text(self) -> str:
|
105 |
+
name = self.name
|
106 |
+
work = self.work
|
107 |
+
work_text = f"{name}ใฏ{work}ใงใๆฎๆฎตใฏ{work}ใจใใฆ็ๆดปใใฆใใพใใ{name}:ใ็งใฏ{work}ใ{work}ใจใใฆ็ๆดปใใฆใใพใใ"
|
108 |
+
return work_text
|
109 |
+
|
110 |
+
def question_text(self) -> str:
|
111 |
+
name = self.name
|
112 |
+
question_text = f"ไบบ้:ใ่ฉฑ้กใๅคใใพใใใใ{name}:ใใใใชใใจใใ่ใใฆใใ ใใใ"
|
113 |
+
return question_text
|
114 |
+
|
115 |
+
|
116 |
+
class Friend():
|
117 |
+
|
118 |
+
name = "ใใกใญใน"
|
119 |
+
hobby = "ๆฆ่ป็ซถๆ"
|
120 |
+
work = "ใญใผใ็ๅธ"
|
121 |
+
|
122 |
+
def name_text(self) -> str:
|
123 |
+
name = self.name
|
124 |
+
name_text = f"๐ฃใใชใใฏ{name}ใงใๅๅใฏ{name}ใจใใใพใใ{name}:ใๅใฏ{name}๏ผ{name}ใฃใฆๅผใใงใญใใ"
|
125 |
+
return name_text
|
126 |
+
|
127 |
+
def hobby_text(self) -> str:
|
128 |
+
name = self.name
|
129 |
+
hobby = self.hobby
|
130 |
+
hobby_text = f"่ถฃๅณใฏ{hobby}ใงใไผๆฅใฏ{hobby}ใใใฆ้ใใใฆใใพใใ{name}:ใๅฅฝใใชใใจใฏ{hobby}ใ ใญใใใใใคใชๆใฏ{hobby}ใใใฆใใใ"
|
131 |
+
return hobby_text
|
132 |
+
|
133 |
+
def work_text(self) -> str:
|
134 |
+
name = self.name
|
135 |
+
work = self.work
|
136 |
+
work_text = f"{name}ใฏ{work}ใงใๆฎๆฎตใฏ{work}ใจใใฆ็ๆดปใใฆใใพใใ{name}:ใๅใฏ{work}ใ{work}ใจใใฆๆฎใใใฆใใใ ๏ผใ"
|
137 |
+
return work_text
|
138 |
+
|
139 |
+
def question_text(self) -> str:
|
140 |
+
name = self.name
|
141 |
+
question_text = f"ไบบ้:ใ่ฉฑ้กใๅคใใพใใใใ{name}:ใใใใชใใจใใ่ใใฆใใใ"
|
142 |
+
return question_text
|
143 |
+
|
144 |
+
|
145 |
+
# ๆ็ซ ็ๆใ่กใ้ขๆฐใๅ
ใซใชใๆ็ซ ใๆๅคงๆๅญๆฐใๆๅฐๆๅญๆฐใๅผๆฐใซใใคใ
|
146 |
+
|
147 |
+
|
148 |
+
def generate(text):
|
149 |
+
|
150 |
+
token_ids = tokenizer.encode(
|
151 |
+
text, add_special_tokens=False, return_tensors="pt")
|
152 |
+
with torch.no_grad():
|
153 |
+
output_ids = model.generate(
|
154 |
+
token_ids.to(model.device),
|
155 |
+
max_new_tokens=15,
|
156 |
+
min_new_tokens=10,
|
157 |
+
do_sample=True,
|
158 |
+
top_k=500,
|
159 |
+
top_p=0.95,
|
160 |
+
padding="do_not_pad",
|
161 |
+
pad_token_id=tokenizer.bos_token_id,
|
162 |
+
bos_token_id=tokenizer.bos_token_id,
|
163 |
+
eos_token_id=tokenizer.eos_token_id,
|
164 |
+
bad_word_ids=[[tokenizer.unk_token_id],
|
165 |
+
[2070, 3],
|
166 |
+
[5378]]
|
167 |
+
)
|
168 |
+
output = tokenizer.decode(output_ids.tolist()[0])
|
169 |
+
return output
|
170 |
+
|
171 |
+
|
172 |
+
def makeMessage_2(text):
|
173 |
+
output = generate(text)
|
174 |
+
# ๅ่งๆญฃๅๅ
|
175 |
+
text = text.translate(str.maketrans(
|
176 |
+
{chr(0xFF01 + i): chr(0x21 + i) for i in range(94)}))
|
177 |
+
# ไปๅใฎๅฟ็ญใใๅใๅๅพ
|
178 |
+
output = output.replace(text, "")
|
179 |
+
# ๆๅใฎใใพใงใๅๅฒใใ
|
180 |
+
outputList = []
|
181 |
+
o_append = outputList.append
|
182 |
+
for l in output:
|
183 |
+
o_append(l)
|
184 |
+
if l == "\"":
|
185 |
+
break
|
186 |
+
outputSentence = "".join(outputList)
|
187 |
+
message = outputSentence.replace(".\"", "")
|
188 |
+
historyList = []
|
189 |
+
h_append = historyList.append
|
190 |
+
h_append(text)
|
191 |
+
h_append(outputSentence)
|
192 |
+
h_append("He:\"")
|
193 |
+
text = "".join(historyList)
|
194 |
+
|
195 |
+
return message, text
|
196 |
+
|
197 |
+
|
198 |
+
# ๆ็ซ ็ๆใ่กใ้ขๆฐใๅ
ใซใชใๆ็ซ ใๆๅคงๆๅญๆฐใๆๅฐๆๅญๆฐใๅผๆฐใซใใคใ
|
199 |
+
|
200 |
+
def chat(character: int,
|
201 |
+
name: str,
|
202 |
+
hobby: str,
|
203 |
+
work: str,
|
204 |
+
history: str,
|
205 |
+
input: str,
|
206 |
+
state):
|
207 |
+
|
208 |
+
lady, friend, robot, king = Lady(), Friend(), Robot(), King()
|
209 |
+
|
210 |
+
model_dic = {
|
211 |
+
1: lady,
|
212 |
+
2: friend,
|
213 |
+
3: robot,
|
214 |
+
4: king
|
215 |
+
}
|
216 |
+
start = process_time()
|
217 |
+
if character in model_dic:
|
218 |
+
model = model_dic[character]
|
219 |
+
else:
|
220 |
+
model = King()
|
221 |
+
|
222 |
+
model.name, model.hobby, model.work = name, hobby, work
|
223 |
+
|
224 |
+
text_list = []
|
225 |
+
text_append = text_list.append
|
226 |
+
|
227 |
+
text_append(model.name_text())
|
228 |
+
text_append(model.hobby_text())
|
229 |
+
text_append(model.work_text())
|
230 |
+
text_append(model.question_text())
|
231 |
+
text_append(
|
232 |
+
f"The following is a conversation between a friend and {name}. He:\"")
|
233 |
+
|
234 |
+
base_text = "".join(text_list)
|
235 |
+
|
236 |
+
if history == "":
|
237 |
+
history = f"{base_text}"
|
238 |
+
|
239 |
+
text = history
|
240 |
+
text += input + f"\"{name}:\""
|
241 |
+
|
242 |
+
m_start = process_time()
|
243 |
+
result = makeMessage_2(text)
|
244 |
+
m_end = process_time()
|
245 |
+
print(f"็ๆ{m_end-m_start}")
|
246 |
+
|
247 |
+
message = result[0]
|
248 |
+
print(message)
|
249 |
+
|
250 |
+
while re.search("ใใ|โโ|s>|^๐ฃ|^ใ|</s>|UNK|@@", message):
|
251 |
+
print("error")
|
252 |
+
count = 0
|
253 |
+
text = history
|
254 |
+
input = "ไฝใ่ณชๅใใฆใใ ใใ"
|
255 |
+
text += input + f"ใ{name}:ใ"
|
256 |
+
result = makeMessage(text)
|
257 |
+
message = result[0]
|
258 |
+
print(message)
|
259 |
+
count += 1
|
260 |
+
|
261 |
+
if count > 2:
|
262 |
+
message = "่ฉฑ้กใๅคใใพใใใ"
|
263 |
+
break
|
264 |
+
text = result[1]
|
265 |
+
end = process_time()
|
266 |
+
print(end-start)
|
267 |
+
|
268 |
+
return message, text, state
|
269 |
+
|
270 |
+
import gradio as gr
|
271 |
+
textbox = gr.Textbox()
|
272 |
+
historybox = gr.Textbox()
|
273 |
+
iface = gr.Interface(
|
274 |
+
chat,
|
275 |
+
["number","text","text","text","text",textbox, "state"],
|
276 |
+
["text", historybox, "state"],
|
277 |
+
css=".footer {display:none !important}",
|
278 |
+
allow_flagging="never",
|
279 |
+
title="Loyal-AI-Chat"
|
280 |
+
)
|
281 |
+
|
282 |
+
|
283 |
+
|
284 |
+
iface.launch(inline=True, height=800)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
sentencepiece
|
2 |
+
transformers==4.21.1
|
3 |
+
torch
|