MLFarmer commited on
Commit
79ba3b2
1 Parent(s): ae9b83d

Upload model

Browse files
Files changed (4) hide show
  1. config.json +26 -0
  2. configuration_my.py +35 -0
  3. modeling_my.py +15 -0
  4. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MyModel"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_my.MyConfig",
7
+ "AutoModel": "modeling_my.MyModel"
8
+ },
9
+ "avg_down": false,
10
+ "base_width": 64,
11
+ "block_type": "bottleneck",
12
+ "cardinality": 1,
13
+ "input_channels": 3,
14
+ "layers": [
15
+ 3,
16
+ 4,
17
+ 6,
18
+ 3
19
+ ],
20
+ "model_type": "my",
21
+ "num_classes": 1000,
22
+ "stem_type": "",
23
+ "stem_width": 64,
24
+ "torch_dtype": "float32",
25
+ "transformers_version": "4.28.1"
26
+ }
configuration_my.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from typing import List
3
+
4
+
5
+ class MyConfig(PretrainedConfig):
6
+ model_type = "my"
7
+
8
+ def __init__(
9
+ self,
10
+ block_type="bottleneck",
11
+ layers: List[int] = [3, 4, 6, 3],
12
+ num_classes: int = 1000,
13
+ input_channels: int = 3,
14
+ cardinality: int = 1,
15
+ base_width: int = 64,
16
+ stem_width: int = 64,
17
+ stem_type: str = "",
18
+ avg_down: bool = False,
19
+ **kwargs,
20
+ ):
21
+ if block_type not in ["basic", "bottleneck"]:
22
+ raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.")
23
+ if stem_type not in ["", "deep", "deep-tiered"]:
24
+ raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.")
25
+
26
+ self.block_type = block_type
27
+ self.layers = layers
28
+ self.num_classes = num_classes
29
+ self.input_channels = input_channels
30
+ self.cardinality = cardinality
31
+ self.base_width = base_width
32
+ self.stem_width = stem_width
33
+ self.stem_type = stem_type
34
+ self.avg_down = avg_down
35
+ super().__init__(**kwargs)
modeling_my.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel
2
+ from .configuration_my import MyConfig
3
+ import torch
4
+
5
+
6
+ class MyModel(PreTrainedModel):
7
+ config_class = MyConfig
8
+
9
+ def __init__(self, config):
10
+ super().__init__(config)
11
+
12
+ self.model = torch.nn.Linear(10, 2)
13
+
14
+ def forward(self, tensor):
15
+ return self.model(tensor)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f46a070f4037a52391eaf6c3afb2638ed6e4ffd2719d26dc1a77e1535037c3a1
3
+ size 1087