YeungNLP commited on
Commit
837c5aa
1 Parent(s): 0727884

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +151 -0
README.md ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 使用[Firefly](https://github.com/yangjianxin1/Firefly)项目微调baichuan-13b-base。训练数据约为一百万多轮对话数据,包括项目分享的moss数据+2万条school math数据。
2
+
3
+ 训练loss:
4
+ ![firefly_logo](firefly-baichuan-13b-loss.jpg)
5
+
6
+ 更多详情见项目:[Firefly](https://github.com/yangjianxin1/Firefly)
7
+
8
+ 技术细节分享:[Firefly增强Baichuan-13B的多轮对话能力](https://mp.weixin.qq.com/s/djO8Tg3emmy6wzw_rTUlcw)
9
+
10
+ 单轮对话:
11
+ ```python
12
+ from transformers import AutoModelForCausalLM, AutoTokenizer
13
+ import torch
14
+ """
15
+ 单轮对话,不具有对话历史的记忆功能
16
+ """
17
+
18
+
19
+ def main():
20
+ model_name = 'YeungNLP/firefly-baichuan-13b'
21
+
22
+ max_new_tokens = 500
23
+ top_p = 0.9
24
+ temperature = 0.35
25
+ repetition_penalty = 1.0
26
+ device = 'cuda'
27
+ model = AutoModelForCausalLM.from_pretrained(
28
+ model_name,
29
+ trust_remote_code=True,
30
+ low_cpu_mem_usage=True,
31
+ torch_dtype=torch.float16,
32
+ device_map='auto'
33
+ ).to(device).eval()
34
+ tokenizer = AutoTokenizer.from_pretrained(
35
+ model_name,
36
+ trust_remote_code=True,
37
+ # llama不支持fast
38
+ use_fast=False if model.config.model_type == 'llama' else True
39
+ )
40
+ # QWenTokenizer比较特殊,pad_token_id、bos_token_id、eos_token_id均为None。eod_id对应的token为<|endoftext|>
41
+ if tokenizer.__class__.__name__ == 'QWenTokenizer':
42
+ tokenizer.pad_token_id = tokenizer.eod_id
43
+ tokenizer.bos_token_id = tokenizer.eod_id
44
+ tokenizer.eos_token_id = tokenizer.eod_id
45
+
46
+ text = input('User:')
47
+ while True:
48
+ text = text.strip()
49
+ # chatglm使用官方的数据组织格式
50
+ if model.config.model_type == 'chatglm':
51
+ text = '[Round 1]\n\n问:{}\n\n答:'.format(text)
52
+ input_ids = tokenizer(text, return_tensors="pt", add_special_tokens=False).input_ids.to(device)
53
+ # 为了兼容qwen-7b,因为其对eos_token进行tokenize,无法得到对应的eos_token_id
54
+ else:
55
+ input_ids = tokenizer(text, return_tensors="pt", add_special_tokens=False).input_ids.to(device)
56
+ bos_token_id = torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long).to(device)
57
+ eos_token_id = torch.tensor([[tokenizer.eos_token_id]], dtype=torch.long).to(device)
58
+ input_ids = torch.concat([bos_token_id, input_ids, eos_token_id], dim=1)
59
+ with torch.no_grad():
60
+ outputs = model.generate(
61
+ input_ids=input_ids, max_new_tokens=max_new_tokens, do_sample=True,
62
+ top_p=top_p, temperature=temperature, repetition_penalty=repetition_penalty,
63
+ eos_token_id=tokenizer.eos_token_id
64
+ )
65
+ outputs = outputs.tolist()[0][len(input_ids[0]):]
66
+ response = tokenizer.decode(outputs)
67
+ response = response.strip().replace(tokenizer.eos_token, "").strip()
68
+ print("Firefly:{}".format(response))
69
+ text = input('User:')
70
+
71
+
72
+ if __name__ == '__main__':
73
+ main()
74
+ ```
75
+
76
+
77
+ 多轮对话:
78
+ ```python
79
+ from transformers import AutoModelForCausalLM, AutoTokenizer
80
+ import torch
81
+
82
+
83
+ def main():
84
+ model_name = 'YeungNLP/firefly-baichuan-13b'
85
+
86
+ device = 'cuda'
87
+ max_new_tokens = 500 # 每轮对话最多生成多少个token
88
+ history_max_len = 1000 # 模型记忆的最大token长度
89
+ top_p = 0.9
90
+ temperature = 0.35
91
+ repetition_penalty = 1.0
92
+
93
+ # 加载模型
94
+ model = AutoModelForCausalLM.from_pretrained(
95
+ model_name,
96
+ trust_remote_code=True,
97
+ low_cpu_mem_usage=True,
98
+ torch_dtype=torch.float16,
99
+ device_map='auto'
100
+ ).to(device).eval()
101
+ tokenizer = AutoTokenizer.from_pretrained(
102
+ model_name,
103
+ trust_remote_code=True,
104
+ # llama不支持fast
105
+ use_fast=False if model.config.model_type == 'llama' else True
106
+ )
107
+ # QWenTokenizer比较特殊,pad_token_id、bos_token_id、eos_token_id均为None。eod_id对应的token为<|endoftext|>
108
+ if tokenizer.__class__.__name__ == 'QWenTokenizer':
109
+ tokenizer.pad_token_id = tokenizer.eod_id
110
+ tokenizer.bos_token_id = tokenizer.eod_id
111
+ tokenizer.eos_token_id = tokenizer.eod_id
112
+
113
+ # 记录所有历史记录
114
+ if model.config.model_type != 'chatglm':
115
+ history_token_ids = torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long)
116
+ else:
117
+ history_token_ids = torch.tensor([[]], dtype=torch.long)
118
+
119
+ # 开始对话
120
+ utterance_id = 0 # 记录当前是第几轮对话,为了契合chatglm的数据组织格式
121
+ user_input = input('User:')
122
+ while True:
123
+ utterance_id += 1
124
+ # chatglm使用官方的数据组织格式
125
+ if model.config.model_type == 'chatglm':
126
+ user_input = '[Round {}]\n\n问:{}\n\n答:'.format(utterance_id, user_input)
127
+ user_input_ids = tokenizer(user_input, return_tensors="pt", add_special_tokens=False).input_ids
128
+ # firefly的数据组织格式
129
+ # 为了兼容qwen-7b,因为其对eos_token进行tokenize,无法得到对应的eos_token_id
130
+ else:
131
+ input_ids = tokenizer(user_input, return_tensors="pt", add_special_tokens=False).input_ids
132
+ eos_token_id = torch.tensor([[tokenizer.eos_token_id]], dtype=torch.long)
133
+ user_input_ids = torch.concat([input_ids, eos_token_id], dim=1)
134
+ history_token_ids = torch.concat((history_token_ids, user_input_ids), dim=1)
135
+ model_input_ids = history_token_ids[:, -history_max_len:].to(device)
136
+ with torch.no_grad():
137
+ outputs = model.generate(
138
+ input_ids=model_input_ids, max_new_tokens=max_new_tokens, do_sample=True, top_p=top_p,
139
+ temperature=temperature, repetition_penalty=repetition_penalty, eos_token_id=tokenizer.eos_token_id
140
+ )
141
+ model_input_ids_len = model_input_ids.size(1)
142
+ response_ids = outputs[:, model_input_ids_len:]
143
+ history_token_ids = torch.concat((history_token_ids, response_ids.cpu()), dim=1)
144
+ response = tokenizer.batch_decode(response_ids)
145
+ print("Firefly:" + response[0].strip().replace(tokenizer.eos_token, ""))
146
+ user_input = input('User:')
147
+
148
+
149
+ if __name__ == '__main__':
150
+ main()
151
+ ```