codefuse-admin commited on
Commit
e48713c
1 Parent(s): ee36b62

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -10
README.md CHANGED
@@ -113,19 +113,17 @@ When applying inference, you always make your input string end with "\<s\>bot" t
113
 
114
 
115
  ```bash
116
- pip install transformers modelscope cpm_kernels -U
117
  pip install -r requirements.txt
118
  ```
119
 
120
  ```python
121
  import torch
122
- from modelscope import (
123
  AutoTokenizer,
124
  AutoModel,
125
- snapshot_download
126
  )
127
- model_dir = snapshot_download('codefuse-ai/CodeFuse-CodeGeeX2-6B',revision = 'v1.0.0')
128
- tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
129
  tokenizer.padding_side = "left"
130
  # try 4bit loading if cuda memory not enough
131
  model = AutoModel.from_pretrained(model_dir,
@@ -261,19 +259,17 @@ CodeFuse-CodeGeeX2-6B 是一个通过LoRA对基座模型CodeGeeeX2进行多代
261
 
262
 
263
  ```bash
264
- pip install transformers modelscope cpm_kernels -U
265
  pip install -r requirements.txt
266
  ```
267
 
268
  ```python
269
  import torch
270
- from modelscope import (
271
  AutoTokenizer,
272
  AutoModel,
273
- snapshot_download
274
  )
275
- model_dir = snapshot_download('codefuse-ai/CodeFuse-CodeGeeX2-6B',revision = 'v1.0.0')
276
- tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
277
  tokenizer.padding_side = "left"
278
  # try 4bit loading if cuda memory not enough
279
  model = AutoModel.from_pretrained(model_dir,
 
113
 
114
 
115
  ```bash
116
+ pip install transformers cpm_kernels -U
117
  pip install -r requirements.txt
118
  ```
119
 
120
  ```python
121
  import torch
122
+ from transformers import (
123
  AutoTokenizer,
124
  AutoModel,
 
125
  )
126
+ tokenizer = AutoTokenizer.from_pretrained('codefuse-ai/CodeFuse-CodeGeeX2-6B', trust_remote_code=True)
 
127
  tokenizer.padding_side = "left"
128
  # try 4bit loading if cuda memory not enough
129
  model = AutoModel.from_pretrained(model_dir,
 
259
 
260
 
261
  ```bash
262
+ pip install transformers cpm_kernels -U
263
  pip install -r requirements.txt
264
  ```
265
 
266
  ```python
267
  import torch
268
+ from transformers import (
269
  AutoTokenizer,
270
  AutoModel,
 
271
  )
272
+ tokenizer = AutoTokenizer.from_pretrained('codefuse-ai/CodeFuse-CodeGeeX2-6B', trust_remote_code=True)
 
273
  tokenizer.padding_side = "left"
274
  # try 4bit loading if cuda memory not enough
275
  model = AutoModel.from_pretrained(model_dir,