Erfan11 commited on
Commit
0a835b0
1 Parent(s): 7c2e0a4

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +20 -20
config.json CHANGED
@@ -1,24 +1,24 @@
1
  {
2
- "model_type": "bert", # Change this based on your model type (e.g., gpt2, roberta, etc.)
3
- "num_labels": 2, # Number of output labels for classification (adjust for your task)
4
- "hidden_size": 768, # Hidden layer size (depends on your model)
5
- "intermediate_size": 3072, # Intermediate size for feed-forward layers
6
- "max_position_embeddings": 512, # Max token length
7
- "num_attention_heads": 12, # Number of attention heads
8
- "num_hidden_layers": 12, # Number of hidden layers in your transformer model
9
- "vocab_size": 30522, # Size of tokenizer vocabulary
10
- "hidden_act": "gelu", # Activation function in hidden layers
11
- "initializer_range": 0.02, # Initialization range for weights
12
- "layer_norm_eps": 1e-12, # Layer normalization epsilon
13
- "pad_token_id": 0, # Padding token ID (usually 0)
14
- "type_vocab_size": 2, # Type vocab size (typically 2 for sentence pairs)
15
- "attention_probs_dropout_prob": 0.1, # Dropout probability for attention layers
16
- "hidden_dropout_prob": 0.1, # Dropout probability for hidden layers
17
- "use_cache": true, # Whether to cache past keys/values
18
- "model_version": "1.0", # Your model version
19
- "tokenizer_class": "BertTokenizer", # Tokenizer class (adjust for your model type)
20
- "classifier_dropout": null, # Optional dropout for classification head
21
  "architectures": [
22
- "BertForSequenceClassification" # Model architecture type
23
  ]
24
  }
 
1
  {
2
+ "model_type": "bert",
3
+ "num_labels": 2,
4
+ "hidden_size": 768,
5
+ "intermediate_size": 3072,
6
+ "max_position_embeddings": 512,
7
+ "num_attention_heads": 12,
8
+ "num_hidden_layers": 12,
9
+ "vocab_size": 30522,
10
+ "hidden_act": "gelu",
11
+ "initializer_range": 0.02,
12
+ "layer_norm_eps": 1e-12,
13
+ "pad_token_id": 0,
14
+ "type_vocab_size": 2,
15
+ "attention_probs_dropout_prob": 0.1,
16
+ "hidden_dropout_prob": 0.1,
17
+ "use_cache": true,
18
+ "model_version": "1.0",
19
+ "tokenizer_class": "BertTokenizer",
20
+ "classifier_dropout": null,
21
  "architectures": [
22
+ "BertForSequenceClassification"
23
  ]
24
  }