yujiepan commited on
Commit
e972678
1 Parent(s): 335e82e

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "THUDM/chatglm3-6b-128k",
3
+ "add_bias_linear": false,
4
+ "add_qkv_bias": true,
5
+ "apply_query_key_layer_scaling": true,
6
+ "apply_residual_connection_post_layernorm": false,
7
+ "architectures": [
8
+ "ChatGLMForConditionalGeneration"
9
+ ],
10
+ "attention_dropout": 0.0,
11
+ "attention_softmax_in_fp32": true,
12
+ "auto_map": {
13
+ "AutoConfig": "THUDM/chatglm3-6b-128k--configuration_chatglm.ChatGLMConfig",
14
+ "AutoModel": "THUDM/chatglm3-6b-128k--modeling_chatglm.ChatGLMForConditionalGeneration",
15
+ "AutoModelForCausalLM": "THUDM/chatglm3-6b-128k--modeling_chatglm.ChatGLMForConditionalGeneration",
16
+ "AutoModelForSeq2SeqLM": "THUDM/chatglm3-6b-128k--modeling_chatglm.ChatGLMForConditionalGeneration",
17
+ "AutoModelForSequenceClassification": "THUDM/chatglm3-6b-128k--modeling_chatglm.ChatGLMForSequenceClassification"
18
+ },
19
+ "bias_dropout_fusion": true,
20
+ "classifier_dropout": null,
21
+ "eos_token_id": 2,
22
+ "ffn_hidden_size": 6,
23
+ "fp32_residual_connection": false,
24
+ "hidden_dropout": 0.0,
25
+ "hidden_size": 4,
26
+ "kv_channels": 2,
27
+ "layernorm_epsilon": 1e-05,
28
+ "model_type": "chatglm",
29
+ "multi_query_attention": true,
30
+ "multi_query_group_num": 2,
31
+ "num_attention_heads": 4,
32
+ "num_layers": 2,
33
+ "original_rope": true,
34
+ "pad_token_id": 0,
35
+ "padded_vocab_size": 65024,
36
+ "post_layer_norm": true,
37
+ "pre_seq_len": null,
38
+ "prefix_projection": false,
39
+ "quantization_bit": 0,
40
+ "rmsnorm": true,
41
+ "rope_ratio": 500,
42
+ "seq_length": 131072,
43
+ "tie_word_embeddings": false,
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.37.2",
46
+ "use_cache": true,
47
+ "vocab_size": 65024
48
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 2,
4
+ "pad_token_id": 0,
5
+ "transformers_version": "4.37.2"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35ee3257029d96226bfaba147dead567a7b49f60f43fc38875dfbe1358b9a24c
3
+ size 2084508
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2
3
+ size 1018370
tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {},
3
+ "auto_map": {
4
+ "AutoTokenizer": [
5
+ "THUDM/chatglm3-6b-128k--tokenization_chatglm.ChatGLMTokenizer",
6
+ null
7
+ ]
8
+ },
9
+ "chat_template": "{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|> \n {{ message['content'] }}{% else %}<|{{ message['role'] }}|> \n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}",
10
+ "clean_up_tokenization_spaces": false,
11
+ "do_lower_case": false,
12
+ "encode_special_tokens": false,
13
+ "eos_token": "</s>",
14
+ "model_max_length": 1000000000000000019884624838656,
15
+ "pad_token": "<unk>",
16
+ "padding_side": "left",
17
+ "remove_space": false,
18
+ "tokenizer_class": "ChatGLMTokenizer",
19
+ "unk_token": "<unk>"
20
+ }