Upload folder using huggingface_hub
#16
by
sharpenb
- opened
- config.json +1 -1
- model.safetensors +1 -1
- smash_config.json +1 -1
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/covalent/.cache/models/
|
3 |
"activation_function": "gelu_new",
|
4 |
"architectures": [
|
5 |
"GPTOptim"
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/covalent/.cache/models/tmpr3k97cwnemb4rm9f",
|
3 |
"activation_function": "gelu_new",
|
4 |
"architectures": [
|
5 |
"GPTOptim"
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1207575528
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a265c4de60ece0c1cfbda682963562bb28ee56467830352f6f80bb1536fbc1e
|
3 |
size 1207575528
|
smash_config.json
CHANGED
@@ -28,7 +28,7 @@
|
|
28 |
"quant_llm-int8_weight_bits": 8,
|
29 |
"max_batch_size": 1,
|
30 |
"device": "cuda",
|
31 |
-
"cache_dir": "/covalent/.cache/models/
|
32 |
"task": "",
|
33 |
"save_load_fn": "bitsandbytes",
|
34 |
"save_load_fn_args": {
|
|
|
28 |
"quant_llm-int8_weight_bits": 8,
|
29 |
"max_batch_size": 1,
|
30 |
"device": "cuda",
|
31 |
+
"cache_dir": "/covalent/.cache/models/tmpr3k97cwn",
|
32 |
"task": "",
|
33 |
"save_load_fn": "bitsandbytes",
|
34 |
"save_load_fn_args": {
|