{ "_name_or_path": "/workspace/process/lightonai_alfred-40b-1023/source", "alibi": false, "apply_residual_connection_post_layernorm": false, "architectures": [ "RWForCausalLM" ], "attention_dropout": 0.0, "auto_map": { "AutoConfig": "configuration_RW.RWConfig", "AutoModelForCausalLM": "modeling_RW.RWForCausalLM" }, "bias": false, "bos_token_id": 11, "embedding_scaling_factor": 4, "eos_token_id": 11, "hidden_dropout": 0.0, "hidden_size": 8192, "initializer_range": 0.02, "layer_norm_epsilon": 1e-05, "model_type": "RefinedWeb", "multi_query": true, "n_head": 128, "n_head_kv": 8, "n_layer": 60, "ntk_scaling_factor": 5, "pad_token_id": 0, "parallel_attn": true, "pretraining_tp": 1, "single_ln": false, "torch_dtype": "bfloat16", "transformers_version": "4.35.0", "use_cache": true, "vanilla_scaling_factor": null, "vocab_size": 65024, "quantization_config": { "bits": 4, "group_size": 128, "damp_percent": 0.1, "desc_act": true, "sym": true, "true_sequential": true, "model_name_or_path": null, "model_file_base_name": "model", "quant_method": "gptq" } }