| import os | |
| from typing import Dict, List, Optional, Union | |
| from transformers.configuration_utils import PretrainedConfig | |
| from transformers.utils import logging | |
| logger = logging.get_logger(__name__) | |
| class PDeepPPConfig(PretrainedConfig): | |
| model_type = "PDeepPP" | |
| def __init__( | |
| self, | |
| input_size=1280, | |
| output_size=128, | |
| num_heads=8, | |
| hidden_size=256, | |
| num_transformer_layers=4, | |
| dropout=0.3, | |
| task_type="", # 留空,依赖 convert 文件动态补充 | |
| esm_ratio=None, # 留空,依赖 convert 文件动态补充 | |
| lambda_=None, # 留空,依赖 convert 文件动态补充 | |
| **kwargs | |
| ): | |
| super().__init__(**kwargs) | |
| self.input_size = input_size | |
| self.output_size = output_size | |
| self.num_heads = num_heads | |
| self.hidden_size = hidden_size | |
| self.num_transformer_layers = num_transformer_layers | |
| self.dropout = dropout | |
| self.task_type = task_type # 默认留空 | |
| self.esm_ratio = esm_ratio # 默认留空 | |
| self.lambda_ = lambda_ # 默认留空 | |
| PDeepPPConfig.register_for_auto_class() |