naughtondale commited on
Commit
3a7710e
1 Parent(s): 67b07ff

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +70 -0
model.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from threading import Thread
2
+ from typing import Iterator
3
+
4
+ import torch
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
+
7
+ model_id = 'Open-Orca/OpenOrca-Preview1-13B'
8
+
9
+ if torch.cuda.is_available():
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ model_id,
12
+ torch_dtype=torch.float16,
13
+ device_map='auto'
14
+ )
15
+ else:
16
+ model = None
17
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
18
+
19
+
20
+ def get_prompt(message: str, chat_history: list[tuple[str, str]],
21
+ system_prompt: str) -> str:
22
+ texts = [f'<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n']
23
+ # The first user input is _not_ stripped
24
+ do_strip = False
25
+ for user_input, response in chat_history:
26
+ user_input = user_input.strip() if do_strip else user_input
27
+ do_strip = True
28
+ texts.append(f'{user_input} [/INST] {response.strip()} </s><s>[INST] ')
29
+ message = message.strip() if do_strip else message
30
+ texts.append(f'{message} [/INST]')
31
+ return ''.join(texts)
32
+
33
+
34
+ def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:
35
+ prompt = get_prompt(message, chat_history, system_prompt)
36
+ input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens=False)['input_ids']
37
+ return input_ids.shape[-1]
38
+
39
+
40
+ def run(message: str,
41
+ chat_history: list[tuple[str, str]],
42
+ system_prompt: str,
43
+ max_new_tokens: int = 1024,
44
+ temperature: float = 0.8,
45
+ top_p: float = 0.95,
46
+ top_k: int = 50) -> Iterator[str]:
47
+ prompt = get_prompt(message, chat_history, system_prompt)
48
+ inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
49
+
50
+ streamer = TextIteratorStreamer(tokenizer,
51
+ timeout=10.,
52
+ skip_prompt=True,
53
+ skip_special_tokens=True)
54
+ generate_kwargs = dict(
55
+ inputs,
56
+ streamer=streamer,
57
+ max_new_tokens=max_new_tokens,
58
+ do_sample=True,
59
+ top_p=top_p,
60
+ top_k=top_k,
61
+ temperature=temperature,
62
+ num_beams=1,
63
+ )
64
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
65
+ t.start()
66
+
67
+ outputs = []
68
+ for text in streamer:
69
+ outputs.append(text)
70
+ yield ''.join(outputs)