STRING_MASS_MODELSCOPE = "modelscope"
STRING_MASS_TRANSFORMERS = "transformers"

Config = {
    # 使用好像和bert 一样。
    # "model_dir":"D:/aiproject/modelscope_model/iic/nlp_structbert_backbone_base_std",
    "model_dir": "D:/aiproject/bert-base-chinese",
    "model_type": "lstm",  # lstm, sbert, bert
    "mass": STRING_MASS_MODELSCOPE,

    # "train_data_path": "D:/dataset/大众点评评价数据/dev.csv",
    "train_data_path": "D:/dataset/新闻/train_tag_news.csv",
    "max_length": 100,
    "num_class": 18,
    "classify_hidden_size": 768,

    "epoch_num": 10,
    "batch_size": 20,
    "learning_rate": 1e-2,
    "optimizer": "adam",

    "peft":{
        "user_peft":False,
        "peft_type": "lora",
        "num_virtual_tokens": 10,
        # 下面是lora 配置
        "r": 8,
        "lora_alpha": 32,
        "lora_dropout": 0.1,
        "target_modules": ['query', 'key', 'value']
    },
    "model_path_save": "model_out",
    "only_save_last": 1,

    "lstm": {
        "hidden_size": 768,
        "rnn_num_layers": 1
    },
    "embedding": {
        "vocab_size": 21128,
        "embedding_dim": 768,
        "pad_token_id": 0,
        "layer_norm_eps": 1e-12,
        "hidden_dropout_prob": 0.1
    }
}
