|
{ |
|
"auto_mapping": null, |
|
"base_model_name_or_path": "abeja/gpt-neox-japanese-2.7b", |
|
"inference_mode": true, |
|
"num_attention_heads": 32, |
|
"num_layers": 32, |
|
"num_transformer_submodules": 1, |
|
"num_virtual_tokens": 16, |
|
"peft_type": "PROMPT_TUNING", |
|
"prompt_tuning_init": "TEXT", |
|
"prompt_tuning_init_text": "\u30ec\u30d3\u30e5\u30fc\u6587\u304c\u30cd\u30ac\u30c6\u30a3\u30d6\u304b\u30dd\u30b8\u30c6\u30a3\u30d6\u304b\u306b\u3064\u3044\u30661\u301c5\u3067\u5224\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u30021\u306b\u8fd1\u3044\u307b\u3069\u30cd\u30ac\u30c6\u30a3\u30d6\u3067\u30015\u306b\u8fd1\u3044\u307b\u3069\u30dd\u30b8\u30c6\u30a3\u30d6\u3067\u3059\uff1a", |
|
"revision": null, |
|
"task_type": "CAUSAL_LM", |
|
"token_dim": 2560, |
|
"tokenizer_name_or_path": "abeja/gpt-neox-japanese-2.7b" |
|
} |