Bellamy66 commited on
Commit
39ecb70
1 Parent(s): df061d8

Upload model.py

Browse files
Files changed (1) hide show
  1. model.py +75 -0
model.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from threading import Thread
2
+ from typing import Iterator
3
+
4
+ import torch
5
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
+
7
+ model_id = 'codellama/CodeLlama-13b-Instruct-hf'
8
+
9
+ if torch.cuda.is_available():
10
+ config = AutoConfig.from_pretrained(model_id)
11
+ config.pretraining_tp = 1
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ model_id,
14
+ config=config,
15
+ torch_dtype=torch.float16,
16
+ load_in_4bit=True,
17
+ device_map='auto',
18
+ use_safetensors=False,
19
+ )
20
+ else:
21
+ model = None
22
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
23
+
24
+
25
+ def get_prompt(message: str, chat_history: list[tuple[str, str]],
26
+ system_prompt: str) -> str:
27
+ texts = [f'<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n']
28
+ # The first user input is _not_ stripped
29
+ do_strip = False
30
+ for user_input, response in chat_history:
31
+ user_input = user_input.strip() if do_strip else user_input
32
+ do_strip = True
33
+ texts.append(f'{user_input} [/INST] {response.strip()} </s><s>[INST] ')
34
+ message = message.strip() if do_strip else message
35
+ texts.append(f'{message} [/INST]')
36
+ return ''.join(texts)
37
+
38
+
39
+ def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:
40
+ prompt = get_prompt(message, chat_history, system_prompt)
41
+ input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens=False)['input_ids']
42
+ return input_ids.shape[-1]
43
+
44
+
45
+ def run(message: str,
46
+ chat_history: list[tuple[str, str]],
47
+ system_prompt: str,
48
+ max_new_tokens: int = 1024,
49
+ temperature: float = 0.1,
50
+ top_p: float = 0.9,
51
+ top_k: int = 50) -> Iterator[str]:
52
+ prompt = get_prompt(message, chat_history, system_prompt)
53
+ inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
54
+
55
+ streamer = TextIteratorStreamer(tokenizer,
56
+ timeout=10.,
57
+ skip_prompt=True,
58
+ skip_special_tokens=True)
59
+ generate_kwargs = dict(
60
+ inputs,
61
+ streamer=streamer,
62
+ max_new_tokens=max_new_tokens,
63
+ do_sample=True,
64
+ top_p=top_p,
65
+ top_k=top_k,
66
+ temperature=temperature,
67
+ num_beams=1,
68
+ )
69
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
70
+ t.start()
71
+
72
+ outputs = []
73
+ for text in streamer:
74
+ outputs.append(text)
75
+ yield ''.join(outputs)