GuoPD commited on
Commit
06f9b15
1 Parent(s): ed70bed

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -3
README.md CHANGED
@@ -30,6 +30,10 @@ tasks:
30
  - [📊 Benchmark评估/Benchmark Evaluation](#Benchmark)
31
  - [📜 声明与协议/Terms and Conditions](#Terms)
32
 
 
 
 
 
33
 
34
  # <span id="Introduction">模型介绍/Introduction</span>
35
 
@@ -59,9 +63,9 @@ In the Baichuan 2 series models, we have utilized the new feature `F.scaled_dot_
59
  import torch
60
  from transformers import AutoModelForCausalLM, AutoTokenizer
61
  from transformers.generation.utils import GenerationConfig
62
- tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan2-13B-Chat", use_fast=False, trust_remote_code=True)
63
- model = AutoModelForCausalLM.from_pretrained("baichuan-inc/Baichuan2-13B-Chat", device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)
64
- model.generation_config = GenerationConfig.from_pretrained("baichuan-inc/Baichuan2-13B-Chat")
65
  messages = []
66
  messages.append({"role": "user", "content": "解释一下“温故而知新”"})
67
  response = model.chat(tokenizer, messages)
 
30
  - [📊 Benchmark评估/Benchmark Evaluation](#Benchmark)
31
  - [📜 声明与协议/Terms and Conditions](#Terms)
32
 
33
+ # 更新
34
+ [2023.12.29] 🎉🎉🎉 我们发布了 **[Baichuan2-13B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat) v2** 版本。其中:
35
+ - 大幅提升了模型的综合能力,特别是数学和逻辑推理、复杂指令跟随能力。
36
+ - 模型处理长度从 4096 提升 至8192 。
37
 
38
  # <span id="Introduction">模型介绍/Introduction</span>
39
 
 
63
  import torch
64
  from transformers import AutoModelForCausalLM, AutoTokenizer
65
  from transformers.generation.utils import GenerationConfig
66
+ tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan2-13B-Chat", revision="v2.0", use_fast=False, trust_remote_code=True)
67
+ model = AutoModelForCausalLM.from_pretrained("baichuan-inc/Baichuan2-13B-Chat", revision="v2.0", device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)
68
+ model.generation_config = GenerationConfig.from_pretrained("baichuan-inc/Baichuan2-13B-Chat", revision="v2.0")
69
  messages = []
70
  messages.append({"role": "user", "content": "解释一下“温故而知新”"})
71
  response = model.chat(tokenizer, messages)