Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,46 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
+
### This model is trained from Mistral-7B-Instruct-V0.2 with 90% chinese dataset and 10% english dataset
|
5 |
+
github [Web-UI](https://github.com/moseshu/llama2-chat/tree/main/webui)
|
6 |
+
|
7 |
+
![image/png](https://cdn-uploads.huggingface.co/production/uploads/62f4c7172f63f904a0c61ba3/JIeyxhTm9_PNzXyU7wQVd.png)
|
8 |
+
|
9 |
+
```
|
10 |
+
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer,AutoTokenizer,AutoModelForCausalLM,MistralForCausalLM
|
11 |
+
import torch
|
12 |
+
|
13 |
+
model_id=Mistral-7B-Instruct-v0.4
|
14 |
+
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
16 |
+
model = AutoModelForCausalLM.from_pretrained(model_id,torch_dtype=torch.bfloat16,device_map="auto",)
|
17 |
+
prompt = "[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant.Help humman as much as you can.\n<</SYS>>\n\n{instruction} [/INST]"
|
18 |
+
text = prompt.format_map({"instruction":"你好,最近干嘛呢"})
|
19 |
+
|
20 |
+
def predict(content_prompt):
|
21 |
+
inputs = tokenizer(content_prompt,return_tensors="pt",add_special_tokens=True)
|
22 |
+
input_ids = inputs["input_ids"].to("cuda:0")
|
23 |
+
# print(f"input length:{len(input_ids[0])}")
|
24 |
+
with torch.no_grad():
|
25 |
+
generation_output = model.generate(
|
26 |
+
input_ids=input_ids,
|
27 |
+
#generation_config=generation_config,
|
28 |
+
return_dict_in_generate=True,
|
29 |
+
output_scores=True,
|
30 |
+
max_new_tokens=2048,
|
31 |
+
top_p=0.9,
|
32 |
+
num_beams=1,
|
33 |
+
do_sample=True,
|
34 |
+
repetition_penalty=1.0,
|
35 |
+
eos_token_id=tokenizer.eos_token_id,
|
36 |
+
pad_token_id=tokenizer.pad_token_id,
|
37 |
+
)
|
38 |
+
s = generation_output.sequences[0]
|
39 |
+
output = tokenizer.decode(s,skip_special_tokens=True)
|
40 |
+
output1 = output.split("[/INST]")[-1].strip()
|
41 |
+
# print(output1)
|
42 |
+
return output1
|
43 |
+
|
44 |
+
predict(text)
|
45 |
+
output:你好!作为一个大型语言模型,我一直在学习和提高自己的能力。最近,我一直在努力学习新知识、改进算法,以便更好地回答用户的问题并提供帮助。同时,我也会定期接受人工智能专家的指导和评估,以确保我的表现不断提升。希望这些信息对你有所帮助!
|
46 |
+
```
|