from transformers.utils.dummy_pt_objects import NezhaModel from time import process_time, sleep import re import torch.nn.utils.prune as prune from torch import nn import torch from transformers import AutoTokenizer from transformers import OPTForCausalLM model_name = "facebook/galactica-1.3b" model = OPTForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) PRUNE_RATE = 0.2 def prune_transform(model: nn.Module) -> nn.Module: for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): prune.l1_unstructured(module, name='weight', amount=PRUNE_RATE) prune.remove(module, "weight") return model model = prune_transform(model) class Lady(): name = "レイテ" hobby = "ゲーム" work = "お嬢様" def name_text(self): name = self.name name_text = f"👣You are {name}, and called {name}. {name} uses a lot of internet slang. {name}:\"AMA.\" He:\"Hey! WYD? What your name?\" {name}:\"I'm {name}. Just call me {name}. lol\"" return name_text def hobby_text(self): name = self.name hobby = self.hobby hobby_text = f"{name} is interested in {hobby}. {name} spends holiday {hobby}. He:\"BTW, what your hobby?\" {name}:\"I love {hobby} cuz my life is {hobby}. Great!!! ☓D\"" return hobby_text def work_text(self): name = self.name work = self.work work_text = f"{name} is {work}, work as {work}. He:\"Wow! What your work?\" {name}:\"I'm {work}. {work} is my calling af. :)\"" return work_text def question_text(self): name = self.name question_text = f"He:\"Let's change the subject.\" {name}:\"Listen to this story.\"" return question_text class King(): name = "レイテ" hobby = "ゲーム" work = "お嬢様" def name_text(self): name = self.name name_text = f"👣You are {name}, and called {name}. He:\"What your name?\" {name}:\"I'm {name}. Please call me {name}.\"" return name_text def hobby_text(self): name = self.name hobby = self.hobby hobby_text = f"{name} is interested in {hobby}. {name} spends holiday {hobby}. He:\"What your hobby?\" {name}:\"My hobby is {hobby}. I enjoy doing {hobby}.\"" return hobby_text def work_text(self): name = self.name work = self.work work_text = f"{name} is {work}, work as {work}. He:\"What your work?\" {name}:\"I {work} I live my life as {work}.\"" return work_text def question_text(self): name = self.name question_text = f"He:Let's change the subject. {name}:\"Listen to this story.\"" return question_text class Robot(): name = "ネロ" hobby = "戦車競技" work = "ローマ皇帝" def name_text(self) -> str: name = self.name name_text = f"👣あなたは{name}で、名前は{name}といいます。{name}:「私は{name}です。{name}と呼んでください」" return name_text def hobby_text(self) -> str: name = self.name hobby = self.hobby hobby_text = f"趣味は{hobby}で、休日は{hobby}をして過ごしています。{name}:「私の趣味は{hobby}です。{hobby}をしていると楽しいです」" return hobby_text def work_text(self) -> str: name = self.name work = self.work work_text = f"{name}は{work}で、普段は{work}として生活しています。{name}:「私は{work}。{work}として生活しています」" return work_text def question_text(self) -> str: name = self.name question_text = f"人間:「話題を変えましょう」{name}:「そんなことより聞いてください」" return question_text class Friend(): name = "ホメロス" hobby = "戦車競技" work = "ローマ皇帝" def name_text(self) -> str: name = self.name name_text = f"👣あなたは{name}で、名前は{name}といいます。{name}:「僕は{name}!{name}って呼んでね〜」" return name_text def hobby_text(self) -> str: name = self.name hobby = self.hobby hobby_text = f"趣味は{hobby}で、休日は{hobby}をして過ごしています。{name}:「好きなことは{hobby}だね。たいくつな時は{hobby}をしてるよ」" return hobby_text def work_text(self) -> str: name = self.name work = self.work work_text = f"{name}は{work}で、普段は{work}として生活しています。{name}:「僕は{work}。{work}として暮らしてるんだ!」" return work_text def question_text(self) -> str: name = self.name question_text = f"人間:「話題を変えましょう」{name}:「そんなことより聞いてよ〜」" return question_text # 文章生成を行う関数。元になる文章、最大文字数、最小文字数を引数にもつ。 def generate(text): token_ids = tokenizer.encode( text, add_special_tokens=False, return_tensors="pt") with torch.no_grad(): output_ids = model.generate( token_ids.to(model.device), max_new_tokens=15, min_new_tokens=10, do_sample=True, top_k=500, top_p=0.95, padding="do_not_pad", pad_token_id=tokenizer.bos_token_id, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id, bad_word_ids=[[tokenizer.unk_token_id], [2070, 3], [5378]] ) output = tokenizer.decode(output_ids.tolist()[0]) return output def makeMessage_2(text): output = generate(text) # 半角正則化 text = text.translate(str.maketrans( {chr(0xFF01 + i): chr(0x21 + i) for i in range(94)})) # 今回の応答より前を取得 output = output.replace(text, "") # 最初の」までを分割する outputList = [] o_append = outputList.append for l in output: o_append(l) if l == "\"": break outputSentence = "".join(outputList) message = outputSentence.replace(".\"", "") historyList = [] h_append = historyList.append h_append(text) h_append(outputSentence) h_append("He:\"") text = "".join(historyList) return message, text # 文章生成を行う関数。元になる文章、最大文字数、最小文字数を引数にもつ。 def chat(character: int, name: str, hobby: str, work: str, history: str, input: str, state): lady, friend, robot, king = Lady(), Friend(), Robot(), King() model_dic = { 1: lady, 2: friend, 3: robot, 4: king } start = process_time() if character in model_dic: model = model_dic[character] else: model = King() model.name, model.hobby, model.work = name, hobby, work text_list = [] text_append = text_list.append text_append(model.name_text()) text_append(model.hobby_text()) text_append(model.work_text()) text_append(model.question_text()) text_append( f"The following is a conversation between a friend and {name}. He:\"") base_text = "".join(text_list) if history == "": history = f"{base_text}" text = history text += input + f"\"{name}:\"" m_start = process_time() result = makeMessage_2(text) m_end = process_time() print(f"生成{m_end-m_start}") message = result[0] print(message) while re.search("〇〇|○○|s>|^👣|^〜||UNK|@@", message): print("error") count = 0 text = history input = "何か質問してください" text += input + f"」{name}:「" result = makeMessage(text) message = result[0] print(message) count += 1 if count > 2: message = "話題を変えましょう" break text = result[1] end = process_time() print(end-start) return message, text, state import gradio as gr textbox = gr.Textbox() historybox = gr.Textbox() iface = gr.Interface( chat, ["number","text","text","text","text",textbox, "state"], ["text", historybox, "state"], css=".footer {display:none !important}", allow_flagging="never", title="Loyal-AI-Chat" ) iface.launch(inline=True, height=800)