dacorvo HF staff commited on
Commit
e8adde1
1 Parent(s): 36bc522

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. README.md +29 -0
  2. config.json +15 -0
  3. configuration_mlp.py +20 -0
  4. modeling_mlp.py +22 -0
  5. pytorch_model.bin +3 -0
README.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pipeline_tag: image-classification
4
+ tags:
5
+ - pretrained
6
+ ---
7
+
8
+ # Model Card for MNIST-MLP
9
+
10
+ This is a simple MLP trained on the MNIST dataset.
11
+
12
+ Its primary use is to be a very simple reference model to test quantization.
13
+
14
+ ## Inputs preprocessing
15
+
16
+ The MNIST images must be normalized and flattened as follows:
17
+
18
+ ```
19
+ from torchvision import datasets, transforms
20
+
21
+
22
+ transform=transforms.Compose([
23
+ transforms.ToTensor(),
24
+ transforms.Normalize((0.1307,), (0.3081,)),
25
+ transforms.Lambda(lambda x: torch.flatten(x)),
26
+ ])
27
+ test_set = datasets.MNIST('../data', train=False, download=True,
28
+ transform=transform)
29
+ ```
config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MLP"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_mlp.MLPConfig",
7
+ "AutoModel": "modeling_mlp.MLP"
8
+ },
9
+ "hidden_size": 256,
10
+ "input_size": 784,
11
+ "model_type": "mlp",
12
+ "output_size": 10,
13
+ "torch_dtype": "float32",
14
+ "transformers_version": "4.34.0"
15
+ }
configuration_mlp.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from typing import List
3
+
4
+
5
+ class MLPConfig(PretrainedConfig):
6
+ model_type="mlp"
7
+
8
+ def __init__(
9
+ self,
10
+ input_size: int = 784,
11
+ output_size: int = 10,
12
+ hidden_size: int = 256,
13
+ **kwargs,
14
+ ):
15
+ self.input_size = input_size
16
+ self.output_size = output_size
17
+ self.hidden_size = hidden_size
18
+ super().__init__(**kwargs)
19
+
20
+ MLPConfig.register_for_auto_class()
modeling_mlp.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from transformers import PreTrainedModel
4
+
5
+ from .configuration_mlp import MLPConfig
6
+
7
+
8
+ class MLP(PreTrainedModel):
9
+ config_class = MLPConfig
10
+
11
+ def __init__(self, config):
12
+ super().__init__(config)
13
+ self.input_layer = torch.nn.Linear(config.input_size, config.hidden_size)
14
+ self.mid_layer = torch.nn.Linear(config.hidden_size, config.hidden_size)
15
+ self.output_layer = torch.nn.Linear(config.hidden_size, config.output_size)
16
+
17
+ def forward(self, inputs):
18
+ x = torch.nn.functional.relu(self.input_layer(inputs))
19
+ x = torch.nn.functional.relu(self.mid_layer(x))
20
+ return torch.nn.functional.softmax(self.output_layer(x), dim=-1)
21
+
22
+ MLP.register_for_auto_class("AutoModel")
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26300c9e4b676ba94c7773ee82de3bb2831b9fd45cc1202b65bc94a6e4db4fa5
3
+ size 1079431