techthiyanes commited on
Commit
adce696
1 Parent(s): a3888e3
.gitattributes DELETED
@@ -1,16 +0,0 @@
1
- *.bin.* filter=lfs diff=lfs merge=lfs -text
2
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.h5 filter=lfs diff=lfs merge=lfs -text
5
- *.tflite filter=lfs diff=lfs merge=lfs -text
6
- *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
- *.ot filter=lfs diff=lfs merge=lfs -text
8
- *.onnx filter=lfs diff=lfs merge=lfs -text
9
- *.arrow filter=lfs diff=lfs merge=lfs -text
10
- *.ftz filter=lfs diff=lfs merge=lfs -text
11
- *.joblib filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.pb filter=lfs diff=lfs merge=lfs -text
15
- *.pt filter=lfs diff=lfs merge=lfs -text
16
- *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Bert_Bahasa_Sentiment ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit a3888e36909329469565a9c26a07e3d32e29d8f4
README.md DELETED
@@ -1,17 +0,0 @@
1
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
2
-
3
- tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
4
-
5
- model = AutoModelForSequenceClassification.from_pretrained('techthiyanes/Bert_Bahasa_Sentiment')
6
-
7
- inputs = tokenizer("saya tidak", return_tensors="pt")
8
-
9
- labels = torch.tensor([1]).unsqueeze(0)
10
-
11
- outputs = model(**inputs, labels=labels)
12
-
13
- loss = outputs.loss
14
-
15
- logits = outputs.logits
16
-
17
- outputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json DELETED
@@ -1,40 +0,0 @@
1
- {
2
- "_name_or_path": "test-idtweet/checkpoint-4680",
3
- "architectures": [
4
- "BertForSequenceClassification"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "directionality": "bidi",
8
- "gradient_checkpointing": false,
9
- "hidden_act": "gelu",
10
- "hidden_dropout_prob": 0.1,
11
- "hidden_size": 768,
12
- "id2label": {
13
- "0": "LABEL_0",
14
- "1": "LABEL_1",
15
- "2": "LABEL_2"
16
- },
17
- "initializer_range": 0.02,
18
- "intermediate_size": 3072,
19
- "label2id": {
20
- "LABEL_0": 0,
21
- "LABEL_1": 1,
22
- "LABEL_2": 2
23
- },
24
- "layer_norm_eps": 1e-12,
25
- "max_position_embeddings": 512,
26
- "model_type": "bert",
27
- "num_attention_heads": 12,
28
- "num_hidden_layers": 12,
29
- "pad_token_id": 0,
30
- "pooler_fc_size": 768,
31
- "pooler_num_attention_heads": 12,
32
- "pooler_num_fc_layers": 3,
33
- "pooler_size_per_head": 128,
34
- "pooler_type": "first_token_transform",
35
- "position_embedding_type": "absolute",
36
- "transformers_version": "4.5.1",
37
- "type_vocab_size": 2,
38
- "use_cache": true,
39
- "vocab_size": 105879
40
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6becaa1830ccf54948ecf1a52b34fcf5ee7a09aeef374e25c2be032f6d17a92e
3
- size 669524233
 
 
 
 
special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
 
 
tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-multilingual-uncased"}
 
 
training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8642cb3aa8bf685452389f857949be08dae40c901e8bedb8ff26c9ad774001e2
3
- size 2351
 
 
 
 
vocab.txt DELETED
The diff for this file is too large to render. See raw diff