{ "_name_or_path": "hf-internal-testing/tiny-random-GPTJForCausalLM", "activation_function": "gelu_new", "architectures": [ "GPTJForCausalLM" ], "attention_probs_dropout_prob": 0.0, "attn_pdrop": 0.0, "bos_token_id": 0, "embd_pdrop": 0.0, "eos_token_id": 0, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "initializer_range": 0.02, "intermediate_size": 37, "is_decoder": true, "layer_norm_epsilon": 1e-05, "model_type": "gptj", "n_embd": 32, "n_head": 4, "n_inner": null, "n_layer": 5, "n_positions": 512, "pad_token_id": 1023, "resid_pdrop": 0.0, "rotary_dim": 4, "scale_attn_weights": true, "tie_word_embeddings": false, "torch_dtype": "float32", "transformers_version": "4.25.1", "type_vocab_size": 16, "use_cache": true, "vocab_size": 1024 }