rmihaylov commited on
Commit
54e216c
1 Parent(s): 1a64033
Files changed (3) hide show
  1. config.json +31 -0
  2. modeling_roberta.py +13 -0
  3. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/drive/MyDrive/ColabModels/XROBERTA_SST2_SMALL/pytorch_model/",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "auto_map": {
8
+ "AutoModel": "modeling_roberta.XLMRobertaForSequenceClassification"
9
+ },
10
+ "bos_token_id": 0,
11
+ "classifier_dropout": null,
12
+ "eos_token_id": 2,
13
+ "hidden_act": "gelu",
14
+ "hidden_dropout_prob": 0.1,
15
+ "hidden_size": 768,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "layer_norm_eps": 1e-12,
19
+ "max_position_embeddings": 514,
20
+ "model_type": "xlm-roberta",
21
+ "num_attention_heads": 12,
22
+ "num_hidden_layers": 6,
23
+ "pad_token_id": 1,
24
+ "position_embedding_type": "absolute",
25
+ "roberta": 1,
26
+ "torch_dtype": "float32",
27
+ "transformers_version": "4.18.0",
28
+ "type_vocab_size": 1,
29
+ "use_cache": true,
30
+ "vocab_size": 88361
31
+ }
modeling_roberta.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch import nn
2
+
3
+ from transformers import XLMRobertaModel
4
+
5
+
6
+ class XLMRobertaForSequenceClassification(XLMRobertaModel):
7
+ def __init__(self, config):
8
+ super().__init__(config)
9
+ self.classifier = nn.Linear(768, 2)
10
+
11
+ def forward(self, input_ids, attention_mask):
12
+ outputs = super(XLMRobertaForSequenceClassification, self).forward(input_ids=input_ids, attention_mask=attention_mask)
13
+ return self.classifier(outputs[1])
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f29b1c8bfd0655d557fb39fed07e9461c55ba2d436f891aaac54b4e9975b8fdb
3
+ size 445547373