Ubuntu commited on
Commit
cbc9aaf
1 Parent(s): 368f865
Files changed (2) hide show
  1. README.md +7 -7
  2. README_en.md +1 -16
README.md CHANGED
@@ -49,14 +49,14 @@ GLM-4V-9B。**GLM-4V-9B** 具备 1120 * 1120 高分辨率下的中英双语多
49
 
50
  在六个多语言数据集上对 GLM-4-9B-Chat 和 Llama-3-8B-Instruct 进行了测试,测试结果及数据集对应选取语言如下表
51
 
52
- | Dataset | Llama-3-8B-Instruct | GLM-4-9B-Chat | Languages
53
  |:------------|:-------------------:|:-------------:|:----------------------------------------------------------------------------------------------:|
54
- | M-MMLU | 49.6 | 56.6 | all
55
- | FLORES | 25.0 | 28.8 | ru, es, de, fr, it, pt, pl, ja, nl, ar, tr, cs, vi, fa, hu, el, ro, sv, uk, fi, ko, da, bg, no
56
- | MGSM | 54.0 | 65.3 | zh, en, bn, de, es, fr, ja, ru, sw, te, th
57
- | XWinograd | 61.7 | 73.1 | zh, en, fr, jp, ru, pt
58
- | XStoryCloze | 84.7 | 90.7 | zh, en, ar, es, eu, hi, id, my, ru, sw, te
59
- | XCOPA | 73.3 | 80.1 | zh, et, ht, id, it, qu, sw, ta, th, tr, vi
60
 
61
  ### 工具调用能力
62
 
 
49
 
50
  在六个多语言数据集上对 GLM-4-9B-Chat 和 Llama-3-8B-Instruct 进行了测试,测试结果及数据集对应选取语言如下表
51
 
52
+ | Dataset | Llama-3-8B-Instruct | GLM-4-9B-Chat | Languages |
53
  |:------------|:-------------------:|:-------------:|:----------------------------------------------------------------------------------------------:|
54
+ | M-MMLU | 49.6 | 56.6 | all |
55
+ | FLORES | 25.0 | 28.8 | ru, es, de, fr, it, pt, pl, ja, nl, ar, tr, cs, vi, fa, hu, el, ro, sv, uk, fi, ko, da, bg, no |
56
+ | MGSM | 54.0 | 65.3 | zh, en, bn, de, es, fr, ja, ru, sw, te, th |
57
+ | XWinograd | 61.7 | 73.1 | zh, en, fr, jp, ru, pt |
58
+ | XStoryCloze | 84.7 | 90.7 | zh, en, ar, es, eu, hi, id, my, ru, sw, te |
59
+ | XCOPA | 73.3 | 80.1 | zh, et, ht, id, it, qu, sw, ta, th, tr, vi |
60
 
61
  ### 工具调用能力
62
 
README_en.md CHANGED
@@ -1,18 +1,3 @@
1
- ---
2
- license: other
3
- license_name: glm-4
4
- license_link: https://huggingface.co/THUDM/glm-4-9b-chat/blob/main/LICENSE
5
-
6
- language:
7
- - zh
8
- - en
9
- tags:
10
- - glm
11
- - chatglm
12
- - thudm
13
- inference: false
14
- ---
15
-
16
  # GLM-4-9B-Chat
17
 
18
  ## Model Introduction
@@ -132,7 +117,7 @@ from vllm import LLM, SamplingParams
132
  # If you encounter OOM, it is recommended to reduce max_model_len or increase tp_size
133
  max_model_len, tp_size = 131072, 1
134
  model_name = "THUDM/glm-4-9b-chat"
135
- prompt = [{"role": "user", "content": "你好"}]
136
 
137
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
138
  llm = LLM(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # GLM-4-9B-Chat
2
 
3
  ## Model Introduction
 
117
  # If you encounter OOM, it is recommended to reduce max_model_len or increase tp_size
118
  max_model_len, tp_size = 131072, 1
119
  model_name = "THUDM/glm-4-9b-chat"
120
+ prompt = [{"role": "user", "content": "hello"}]
121
 
122
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
123
  llm = LLM(