zRzRzRzRzRzRzR commited on
Commit
3526756
1 Parent(s): bcf026a

fix readme

Browse files
Files changed (2) hide show
  1. README.md +5 -2
  2. README_en.md +5 -2
README.md CHANGED
@@ -45,7 +45,10 @@ GLM-4-9B 是智谱 AI 推出的最新一代预训练模型 GLM-4 系列中的开
45
 
46
  ```python
47
  import torch
48
- from Transformers
 
 
 
49
 
50
  device = "cuda"
51
 
@@ -62,7 +65,7 @@ inputs = tokenizer.apply_chat_template([{"role": "user", "content": query}],
62
 
63
  inputs = inputs.to(device)
64
  model = AutoModelForCausalLM.from_pretrained(
65
- "ZhipuAI/glm-4-9b-chat-1m",
66
  torch_dtype=torch.bfloat16,
67
  low_cpu_mem_usage=True,
68
  trust_remote_code=True
 
45
 
46
  ```python
47
  import torch
48
+ from transformers import (
49
+ AutoModelForCausalLM,
50
+ AutoTokenizer,
51
+ )
52
 
53
  device = "cuda"
54
 
 
65
 
66
  inputs = inputs.to(device)
67
  model = AutoModelForCausalLM.from_pretrained(
68
+ "THUDM/glm-4-9b-chat-1m",
69
  torch_dtype=torch.bfloat16,
70
  low_cpu_mem_usage=True,
71
  trust_remote_code=True
README_en.md CHANGED
@@ -38,13 +38,16 @@ Use the transformers backend for inference:
38
 
39
  ```python
40
  import torch
41
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
42
 
43
  device = "cuda"
44
 
45
  tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat-1m",trust_remote_code=True)
46
 
47
- query = "hello"
48
 
49
  inputs = tokenizer.apply_chat_template([{"role": "user", "content": query}],
50
  add_generation_prompt=True,
 
38
 
39
  ```python
40
  import torch
41
+ from transformers import (
42
+ AutoModelForCausalLM,
43
+ AutoTokenizer,
44
+ )
45
 
46
  device = "cuda"
47
 
48
  tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat-1m",trust_remote_code=True)
49
 
50
+ query = "你好"
51
 
52
  inputs = tokenizer.apply_chat_template([{"role": "user", "content": query}],
53
  add_generation_prompt=True,