model: class_path: model.lina.Lina init_args: n_codebook: 1024 n_special_token_in: 3 n_special_token_out: 3 n_txt_vocab: 180 d_context: 384 d_model: 512 quant_layer: [0, 1, 2, 3] txt_encoder: class_path: model.encoder.TextEncoder init_args: dim: 512 heads: 1 n_layers: 6 dropout: 0.1 attentive_rnn: class_path: model.rwkv6x.AttentiveRWKV6 init_args: d_model: 512 d_context: 512 heads: 1 dropout_att: 0.2 n_layer: 3 blind: True