stefiane Zhang (ๅผ ๅฎถๅŽ)-ๆตชๆฝฎไฟกๆฏ commited on
Commit
5a05232
โ€ข
1 Parent(s): a8b57a5

Add application file

Browse files
Files changed (1) hide show
  1. app.py +66 -0
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
4
+ from threading import Thread
5
+
6
+ import torch, transformers
7
+ import sys, os
8
+ sys.path.append(
9
+ os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
10
+ from transformers import AutoModelForCausalLM,AutoTokenizer,LlamaTokenizer
11
+
12
+ print("Creat tokenizer...")
13
+ tokenizer = LlamaTokenizer.from_pretrained('IEITYuan/Yuan2-2B-hf', add_eos_token=False, add_bos_token=False, eos_token='<eod>')
14
+ tokenizer.add_tokens(['<sep>', '<pad>', '<mask>', '<predict>', '<FIM_SUFFIX>', '<FIM_PREFIX>', '<FIM_MIDDLE>','<commit_before>','<commit_msg>','<commit_after>','<jupyter_start>','<jupyter_text>','<jupyter_code>','<jupyter_output>','<empty_output>'], special_tokens=True)
15
+
16
+ print("Creat model...")
17
+ model = AutoModelForCausalLM.from_pretrained('IEITYuan/Yuan2-2B-hf', device_map='auto', torch_dtype=torch.bfloat16, trust_remote_code=True)
18
+
19
+
20
+ # Defining a custom stopping criteria class for the model's text generation.
21
+ class StopOnTokens(StoppingCriteria):
22
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
23
+ stop_ids = [2] # IDs of tokens where the generation should stop.
24
+ for stop_id in stop_ids:
25
+ if input_ids[0][-1] == stop_id: # Checking if the last generated token is a stop token.
26
+ return True
27
+ return False
28
+
29
+
30
+ # Function to generate model predictions.
31
+ def predict(message, history):
32
+ history_transformer_format = history + [[message, ""]]
33
+ stop = StopOnTokens()
34
+
35
+ # Formatting the input for the model.
36
+ messages = "</s>".join(["</s>".join(["\n<|user|>:" + item[0], "\n<|assistant|>:" + item[1]])
37
+ for item in history_transformer_format])
38
+ model_inputs = tokenizer([messages], return_tensors="pt").to(device)
39
+ streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
40
+ generate_kwargs = dict(
41
+ model_inputs,
42
+ streamer=streamer,
43
+ max_new_tokens=1024,
44
+ do_sample=True,
45
+ top_p=0.95,
46
+ top_k=50,
47
+ temperature=0.7,
48
+ num_beams=1,
49
+ stopping_criteria=StoppingCriteriaList([stop])
50
+ )
51
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
52
+ t.start() # Starting the generation in a separate thread.
53
+ partial_message = ""
54
+ for new_token in streamer:
55
+ partial_message += new_token
56
+ if '</s>' in partial_message: # Breaking the loop if the stop token is generated.
57
+ break
58
+ yield partial_message
59
+
60
+
61
+ # Setting up the Gradio chat interface.
62
+ gr.ChatInterface(predict,
63
+ title="Yuan2_2b_chatBot",
64
+ description="่ฏทๆ้—ฎ",
65
+ examples=['่ฏท้—ฎ็›ฎๅ‰ๆœ€ๅ…ˆ่ฟ›็š„ๆœบๅ™จๅญฆไน ็ฎ—ๆณ•ๆœ‰ๅ“ชไบ›๏ผŸ']
66
+ ).launch() # Launching the web interface.