danielhanchen commited on
Commit
ab94e2f
·
verified ·
1 Parent(s): 601b161

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ BF16/QwQ-32B.BF16-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
37
+ QwQ-32B-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
38
+ QwQ-32B.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
39
+ QwQ-32B-Q2_K_L.gguf filter=lfs diff=lfs merge=lfs -text
40
+ QwQ-32B-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
41
+ QwQ-32B-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ QwQ-32B-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
43
+ QwQ-32B-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
44
+ BF16/QwQ-32B.BF16-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
BF16/QwQ-32B.BF16-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9776ec62e5fdd9f30e3331af3d6f45cbe0d0412697250b01b1966a2e28431a4a
3
+ size 49916375328
BF16/QwQ-32B.BF16-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56eb9a39eabb18b0d1a4bd66887f475bd914b0f0d31d5aa6a269cb48cddd24d5
3
+ size 15619594368
QwQ-32B-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abb3e6ab4cfd316ce2470f2eecdbee30bdc4fcff8e77d5151565c8aac1e7399d
3
+ size 12313098432
QwQ-32B-Q2_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9758aa98d50b7609e3b973a52676dbae58c815abc0ece7e3a28265be51f9131a
3
+ size 12495575232
QwQ-32B-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8803191bbabcc23ff0dabdc4056b8bc5e00fb0816cd0ac8b7c7a04fd3381e192
3
+ size 15935047872
QwQ-32B-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a57a952d3d7469581ff623863992b1baadff060aa9a88cba092206f92efadfb
3
+ size 19851335872
QwQ-32B-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86b921a7c81dcca1f8adafa4f05b1ebdf43e1fe4cd7309f554107a82d63ced81
3
+ size 23262156992
QwQ-32B-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:052d192729c2557ec95cfe4a5db7c5c2b68970a2585e0be41201fb29543b8e87
3
+ size 26886154432
QwQ-32B.Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:619c98e932380bb9f5b7860cda119a6514c67d30432288b368515dda43876f48
3
+ size 34820884672
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/QwQ-32B",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 5120,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 27648,
12
+ "max_position_embeddings": 131072,
13
+ "max_window_layers": 64,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 40,
16
+ "num_hidden_layers": 64,
17
+ "num_key_value_heads": 8,
18
+ "pad_token_id": 151654,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": 32768,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.49.0",
26
+ "unsloth_fixed": true,
27
+ "use_cache": true,
28
+ "use_sliding_window": false,
29
+ "vocab_size": 152064
30
+ }