Upload SimpleMLPForClassification
Browse files- config.json +4 -0
- model.safetensors +1 -1
- modeling_simple_mlp.py +43 -0
- simple_mlp_configuration.py +18 -0
config.json
CHANGED
|
@@ -2,6 +2,10 @@
|
|
| 2 |
"architectures": [
|
| 3 |
"SimpleMLPForClassification"
|
| 4 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
"dropout_rate": 0.1,
|
| 6 |
"dtype": "float32",
|
| 7 |
"hidden_dim": 64,
|
|
|
|
| 2 |
"architectures": [
|
| 3 |
"SimpleMLPForClassification"
|
| 4 |
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "simple_mlp_configuration.SimpleMLPConfig",
|
| 7 |
+
"AutoModelForSequenceClassification": "modeling_simple_mlp.SimpleMLPForClassification"
|
| 8 |
+
},
|
| 9 |
"dropout_rate": 0.1,
|
| 10 |
"dtype": "float32",
|
| 11 |
"hidden_dim": 64,
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 34124
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:85bf988b2688164019bb0a0766f9cba665d10e18d2dc816439d1cfc26c90b534
|
| 3 |
size 34124
|
modeling_simple_mlp.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from transformers import PreTrainedModel
|
| 4 |
+
from simple_mlp_configuration import SimpleMLPConfig
|
| 5 |
+
from transformers.modeling_outputs import SequenceClassifierOutput
|
| 6 |
+
|
| 7 |
+
class SimpleMLPForClassification(PreTrainedModel):
|
| 8 |
+
config_class = SimpleMLPConfig
|
| 9 |
+
|
| 10 |
+
def __init__(self, config):
|
| 11 |
+
super().__init__(config)
|
| 12 |
+
self.config = config
|
| 13 |
+
self.num_labels = config.num_classes
|
| 14 |
+
|
| 15 |
+
self.fc1 = nn.Linear(config.input_dim, config.hidden_dim)
|
| 16 |
+
self.activation = nn.ReLU()
|
| 17 |
+
self.dropout = nn.Dropout(config.dropout_rate)
|
| 18 |
+
self.fc2 = nn.Linear(config.hidden_dim, config.num_classes)
|
| 19 |
+
|
| 20 |
+
self.post_init()
|
| 21 |
+
|
| 22 |
+
def forward(self, inputs_embeds, labels=None, return_dict=None):
|
| 23 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 24 |
+
|
| 25 |
+
x = self.fc1(inputs_embeds)
|
| 26 |
+
x = self.activation(x)
|
| 27 |
+
x = self.dropout(x)
|
| 28 |
+
logits = self.fc2(x)
|
| 29 |
+
|
| 30 |
+
loss = None
|
| 31 |
+
if labels is not None:
|
| 32 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 33 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 34 |
+
|
| 35 |
+
if not return_dict:
|
| 36 |
+
output = (logits,)
|
| 37 |
+
return ((loss,) + output) if loss is not None else output
|
| 38 |
+
|
| 39 |
+
return SequenceClassifierOutput(
|
| 40 |
+
loss=loss,
|
| 41 |
+
logits=logits,
|
| 42 |
+
)
|
| 43 |
+
|
simple_mlp_configuration.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import PretrainedConfig
|
| 2 |
+
|
| 3 |
+
class SimpleMLPConfig(PretrainedConfig):
|
| 4 |
+
model_type = "simple_mlp"
|
| 5 |
+
|
| 6 |
+
def __init__(
|
| 7 |
+
self,
|
| 8 |
+
input_dim=768,
|
| 9 |
+
hidden_dim=256,
|
| 10 |
+
num_classes=2,
|
| 11 |
+
dropout_rate=0.1,
|
| 12 |
+
**kwargs
|
| 13 |
+
):
|
| 14 |
+
self.input_dim = input_dim
|
| 15 |
+
self.hidden_dim = hidden_dim
|
| 16 |
+
self.num_classes = num_classes
|
| 17 |
+
self.dropout_rate = dropout_rate
|
| 18 |
+
super().__init__(**kwargs)
|