Tonic commited on
Commit
ddd4fed
1 Parent(s): 381bab8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -105
app.py CHANGED
@@ -1,117 +1,79 @@
1
- import os
2
- import math
3
- import transformers
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import torch
 
6
  import gradio as gr
7
  import sentencepiece
8
- import gc
9
-
10
- title = "# Welcome to 🙋🏻‍♂️Tonic's🌷Tulu Chat!"
11
- description = """[allenai/tulu-2-dpo-7b](https://huggingface.co/allenai/tulu-2-dpo-7b) and larger Tulu-2 models are Instruct Llama Finetunes using the [mistralai/Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.1) recipe. You can use [allenai/tulu-2-13b](https://huggingface.co/allenai/tulu-2-13b) here via API using Gradio by scrolling down and clicking Use 'Via API' or privately by [cloning this space on huggingface](https://huggingface.co/spaces/Tonic1/TuluDemo?duplicate=true) See also the large model here : [allenai/tulu-2-dpo-70b](https://huggingface.co/allenai/tulu-2-dpo-70b) . [Join my active builders' server on discord](https://discord.gg/VqTxc76K3u). Let's build together!. [Add this Space as a discord bot to your server by clicking this link](https://discord.com/oauth2/authorize?client_id=1176628808212828231&scope=bot+applications.commands&permissions=326417525824). Big thanks to 🤗Huggingface Organisation for the🫂Community Grant"""
12
-
13
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
14
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
- model_name = "allenai/tulu-2-dpo-13b"
16
- tokenizer = AutoTokenizer.from_pretrained("allenai/tulu-2-dpo-13b")
17
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
18
-
19
- # bos_token_id = 1
20
- # eos_token_id = 2
21
- # tokenizer.bos_token_id = bos_token_id
22
- # tokenizer.eos_token_id = eos_token_id
23
- # model.config.bos_token_id = bos_token_id
24
- # model.config.eos_token_id = eos_token_id
25
- # if tokenizer.pad_token is None:
26
- # tokenizer.pad_token = tokenizer.eos_token
27
- # model.config.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
28
-
29
- class TuluChatBot:
30
- def __init__(self, model, tokenizer, system_message="You are 🌷Tulu, an AI language model created by Tonic-AI. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."):
31
- self.model = model
32
- self.tokenizer = tokenizer
33
- self.system_message = system_message
34
-
35
- def set_system_message(self, new_system_message):
36
- self.system_message = new_system_message
37
-
38
- def format_prompt(self, user_message):
39
- prompt = f"<|assistant|>\n{self.system_message}\n<|user|>{user_message}\n<|assistant|>\n"
40
- return prompt
41
-
42
- def Tulu(self, user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample):
43
- try:
44
- prompt = self.format_prompt(user_message)
45
- inputs = self.tokenizer(prompt, return_tensors='pt', add_special_tokens=True)
46
- input_ids = inputs["input_ids"].to(self.model.device)
47
- attention_mask = inputs["attention_mask"].to(self.model.device)
48
-
49
-
50
- output_ids = self.model.generate(
51
- input_ids,
52
- attention_mask=attention_mask,
53
- max_length=input_ids.shape[1] + max_new_tokens,
54
- temperature=temperature,
55
- top_p=top_p,
56
- repetition_penalty=repetition_penalty,
57
- do_sample=do_sample
58
- )
59
-
60
- response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
61
- response = response.strip()
62
- response = response.split("<|assistant|>\n")[-1]
63
- return response
64
- finally:
65
- del input_ids, attention_mask, output_ids
66
- gc.collect()
67
- torch.cuda.empty_cache()
68
-
69
- def gradio_Tulu(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
70
- Tulu_bot.set_system_message(system_message)
71
- if not do_sample:
72
- max_length = 780
73
- temperature = 0.9
74
- top_p = 0.9
75
- repetition_penalty = 0.9
76
- response = Tulu_bot.Tulu(user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample)
77
- return response
78
-
79
- # Initialize TuluChatBot
80
- Tulu_bot = TuluChatBot(model, tokenizer)
81
 
82
- # Gradio interface function
83
- def gradio_Tulu(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
84
- Tulu_bot.set_system_message(system_message)
85
- response = Tulu_bot.Tulu(user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample)
86
- return response
87
 
 
 
 
 
 
 
88
 
89
- with gr.Blocks(theme = "ParityError/Anime") as demo:
90
- gr.Markdown(title)
91
- gr.Markdown(description)
92
- with gr.Row():
93
- system_message = gr.Textbox(label="Optional 🌷Tulu Assistant Message", lines=2)
94
- user_message = gr.Textbox(label="Your Message", lines=3)
95
- with gr.Row():
96
- do_sample = gr.Checkbox(label="Advanced", value=True)
97
-
98
- with gr.Accordion("Advanced Settings", open=lambda do_sample: do_sample):
99
- with gr.Row():
100
- max_new_tokens = gr.Slider(label="Max new tokens", value=250, minimum=20, maximum=450, step=1)
101
- temperature = gr.Slider(label="Temperature", value=0.3, minimum=0.1, maximum=1.0, step=0.1)
102
- top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.01, maximum=0.99, step=0.05)
103
- repetition_penalty = gr.Slider(label="Repetition penalty", value=0.9, minimum=0.05, maximum=1.0, step=0.05)
104
 
105
- submit_button = gr.Button("Submit")
106
- output_text = gr.Textbox(label="🌷Tulu Response")
107
 
108
- def process(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
109
- return gradio_Tulu(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample)
 
 
 
 
110
 
111
  submit_button.click(
112
- process,
113
- inputs=[user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample],
114
- outputs=output_text
115
  )
116
-
117
  demo.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
 
2
  import torch
3
+ import os
4
  import gradio as gr
5
  import sentencepiece
6
+ from tokenization_yi import YiTokenizer
7
+
8
+
9
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:120'
10
+ model_id = "01-ai/Yi-6B-200K"
11
+ tokenizer_path = "./"
12
+ eos_token_id = 7
13
+
14
+ DESCRIPTION = """
15
+ # 👋🏻Welcome to 🙋🏻‍♂️Tonic's🧑🏻‍🚀YI-200K🚀
16
+ You can use this Space to test out the current model [01-ai/Yi-6B-200k](https://huggingface.co/01-ai/Yi-6B-200k) "🦙Llamified" version based on [01-ai/Yi-34B](https://huggingface.co/01-ai/Yi-34B)
17
+ You can also use 🧑🏻‍🚀YI-200K🚀 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic1/YiTonic?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
18
+ Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
19
+ """
20
+
21
+ tokenizer = AutoTokenizer.from_pretrained(model_id, device_map="auto", trust_remote_code=True)
22
+ # tokenizer = YiTokenizer.from_pretrained(tokenizer_path)
23
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)
24
+ tokenizer.eos_token_id = eos_token_id
25
+ model.config.eos_token_id = eos_token_id
26
+
27
+ def format_prompt(user_message, system_message="You are YiTonic, an AI language model created by Tonic-AI. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and follow ethical guidelines and promote positive behavior."):
28
+ prompt = f"<|im_start|>assistant\n{system_message}<|im_end|>\n<|im_start|>\nuser\n{user_message}<|im_end|>\nassistant\n"
29
+ return prompt
30
+
31
+ def predict(message, system_message, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=40, model_max_length = 32000, do_sample=False):
32
+ formatted_prompt = format_prompt(message, system_message)
33
+
34
+ input_ids = tokenizer.encode(formatted_prompt, return_tensors='pt')
35
+ input_ids = input_ids.to(model.device)
36
+
37
+ response_ids = model.generate(
38
+ input_ids,
39
+ max_length=max_new_tokens + input_ids.shape[1],
40
+ temperature=temperature,
41
+ top_p=top_p,
42
+ top_k=top_k,
43
+ no_repeat_ngram_size=9,
44
+ pad_token_id=tokenizer.eos_token_id,
45
+ do_sample=do_sample
46
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
+ response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
49
+ truncate_str = "<|im_end|>"
50
+ if truncate_str and truncate_str in response:
51
+ response = response.split(truncate_str)[0]
 
52
 
53
+ return [("bot", response)]
54
+ with gr.Blocks(theme='ParityError/Anime') as demo:
55
+ gr.Markdown(DESCRIPTION)
56
+ with gr.Group():
57
+ textbox = gr.Textbox(placeholder='Your Message Here', label='Your Message', lines=2)
58
+ system_prompt = gr.Textbox(placeholder='Provide a System Prompt In The First Person', label='System Prompt', lines=2, value="You are YiTonic, an AI language model created by Tonic-AI. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior.")
59
 
60
+ with gr.Group():
61
+ chatbot = gr.Chatbot(label='TonicYi-6B-200K-🧠🤯')
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
+ with gr.Group():
64
+ submit_button = gr.Button('Submit', variant='primary')
65
 
66
+ with gr.Accordion(label='Advanced options', open=False):
67
+ max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=55000, step=1, value=4056)
68
+ temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=1.2)
69
+ top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
70
+ top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=40)
71
+ do_sample_checkbox = gr.Checkbox(label='Disable for faster inference', value=True)
72
 
73
  submit_button.click(
74
+ fn=predict,
75
+ inputs=[textbox, system_prompt, max_new_tokens, temperature, top_p, top_k, do_sample_checkbox],
76
+ outputs=chatbot
77
  )
78
+
79
  demo.launch()