{ "config": { "architecture": "union", "configs": [ { "architecture": "prefix_tuning", "bottleneck_size": 512, "cross_prefix": true, "dropout": 0.0, "encoder_prefix": true, "flat": false, "leave_out": [], "non_linearity": "tanh", "prefix_length": 10, "shared_gating": true, "use_gating": true }, { "adapter_residual_before_ln": false, "cross_adapter": false, "factorized_phm_W": true, "factorized_phm_rule": false, "hypercomplex_nonlinearity": "glorot-uniform", "init_weights": "bert", "inv_adapter": null, "inv_adapter_reduction_factor": null, "is_parallel": false, "learn_phm": true, "leave_out": [], "ln_after": false, "ln_before": false, "mh_adapter": false, "non_linearity": "relu", "original_ln_after": true, "original_ln_before": true, "output_adapter": true, "phm_bias": true, "phm_c_init": "normal", "phm_dim": 4, "phm_init_range": 0.0001, "phm_layer": false, "phm_rank": 1, "reduction_factor": 16, "residual_before_ln": true, "scaling": 1.0, "shared_W_phm": false, "shared_phm_rule": true, "use_gating": true }, { "alpha": 8, "architecture": "lora", "attn_matrices": [ "q", "v" ], "composition_mode": "add", "dropout": 0.0, "init_weights": "lora", "intermediate_lora": false, "output_lora": false, "r": 8, "selfattn_lora": true, "use_gating": true } ] }, "hidden_size": 768, "model_class": "BertModelWithHeads", "model_name": "bert-base-uncased", "model_type": "bert", "name": "difraud-fake_news", "version": "3.2.1" }