Text Generation
Transformers
Chinese
llama
text-generation-inference
weiren119 commited on
Commit
fc5b9c2
1 Parent(s): 3115c2c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +75 -0
README.md CHANGED
@@ -1,3 +1,78 @@
1
  ---
2
  license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
  ---
4
+ ## Intro
5
+
6
+ - The 4bits-GQTQ model was converted from [Taiwan-LLaMa-v1.0 13b](https://huggingface.co/yentinglin/Taiwan-LLaMa-v1.0) by the package [auto-gptq](https://github.com/PanQiWei/AutoGPTQ)
7
+
8
+ ## How to use gptq model pyhton code
9
+ - Install gptq package: `pip install auto-gptq`
10
+ - Here is the example code
11
+ ```
12
+ from transformers import AutoTokenizer,TextStreamer,TextIteratorStreamer
13
+ from auto_gptq import AutoGPTQForCausalLM
14
+
15
+
16
+ class TaiwanLLaMaGPTQ:
17
+ def __init__(self, model_dir):
18
+ self.tokenizer = AutoTokenizer.from_pretrained(model_dir, use_fast=True)
19
+ self.model = AutoGPTQForCausalLM.from_quantized(model_dir,
20
+ trust_remote_code=True,
21
+ use_safetensors=True,
22
+ device_map="auto",
23
+ use_triton=False,
24
+ strict=False)
25
+ self.chat_history = []
26
+ self.system_prompt = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
27
+
28
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
29
+
30
+ self.streamer = TextStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
31
+ self.thread_streamer = TextIteratorStreamer(self.tokenizer, skip_special_tokens=True)
32
+ def get_prompt(self, message: str, chat_history: list[tuple[str, str]]) -> str:
33
+ texts = [f'[INST] <<SYS>>\n{self.system_prompt}\n<</SYS>>\n\n']
34
+ for user_input, response in chat_history:
35
+ texts.append(f'{user_input.strip()} [/INST] {response.strip()} </s><s> [INST] ')
36
+ texts.append(f'{message.strip()} [/INST]')
37
+ return ''.join(texts)
38
+
39
+ def generate(self, message: str):
40
+ prompt = self.get_prompt(message, self.chat_history)
41
+ tokens = self.tokenizer(prompt, return_tensors='pt').input_ids
42
+ generate_ids = self.model.generate(input_ids=tokens.cuda(), max_new_tokens=4096, streamer=self.streamer)
43
+ output = self.tokenizer.decode(generate_ids[0, len(tokens[0]):-1]).strip()
44
+ self.chat_history.append([message, output])
45
+ return output
46
+
47
+ def thread_generate(self, message:str):
48
+ from threading import Thread
49
+ prompt = self.get_prompt(message, self.chat_history)
50
+ inputs = self.tokenizer(prompt, return_tensors="pt")
51
+
52
+ generation_kwargs = dict(
53
+ inputs=inputs.input_ids.cuda(),
54
+ attention_mask=inputs.attention_mask,
55
+ temperature=0.1,
56
+ max_new_tokens=1024,
57
+ streamer=self.thread_streamer,
58
+ )
59
+
60
+ # Run generation on separate thread to enable response streaming.
61
+ thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
62
+ thread.start()
63
+ for new_text in self.thread_streamer:
64
+ yield new_text
65
+
66
+ thread.join()
67
+
68
+ inferencer = TaiwanLLaMaGPTQ("weiren119/Taiwan-LLaMa-v1.0-4bits-GPTQ")
69
+
70
+
71
+ s = ''
72
+ while True:
73
+ s = input("User: ")
74
+ if s != '':
75
+ print ('Answer:')
76
+ print (inferencer.generate(s))
77
+ print ('-'*80)
78
+ ```