File size: 1,395 Bytes
7c2e0a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
{
  "model_type": "bert",  # Change this based on your model type (e.g., gpt2, roberta, etc.)
  "num_labels": 2,  # Number of output labels for classification (adjust for your task)
  "hidden_size": 768,  # Hidden layer size (depends on your model)
  "intermediate_size": 3072,  # Intermediate size for feed-forward layers
  "max_position_embeddings": 512,  # Max token length
  "num_attention_heads": 12,  # Number of attention heads
  "num_hidden_layers": 12,  # Number of hidden layers in your transformer model
  "vocab_size": 30522,  # Size of tokenizer vocabulary
  "hidden_act": "gelu",  # Activation function in hidden layers
  "initializer_range": 0.02,  # Initialization range for weights
  "layer_norm_eps": 1e-12,  # Layer normalization epsilon
  "pad_token_id": 0,  # Padding token ID (usually 0)
  "type_vocab_size": 2,  # Type vocab size (typically 2 for sentence pairs)
  "attention_probs_dropout_prob": 0.1,  # Dropout probability for attention layers
  "hidden_dropout_prob": 0.1,  # Dropout probability for hidden layers
  "use_cache": true,  # Whether to cache past keys/values
  "model_version": "1.0",  # Your model version
  "tokenizer_class": "BertTokenizer",  # Tokenizer class (adjust for your model type)
  "classifier_dropout": null,  # Optional dropout for classification head
  "architectures": [
    "BertForSequenceClassification"  # Model architecture type
  ]
}