system HF staff commited on
Commit
b8f54d0
1 Parent(s): 8297a80

Commit From AutoTrain

Browse files
.gitattributes CHANGED
@@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
30
+ *.pkl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: autotrain
3
+ language: ar
4
+ widget:
5
+ - text: "I love AutoTrain 🤗"
6
+ datasets:
7
+ - Yah216/autotrain-data-Poem_Rawiy_detection
8
+ co2_eq_emissions: 1.8046766441629636
9
+ ---
10
+
11
+ # Model Trained Using AutoTrain
12
+
13
+ - Problem type: Multi-class Classification
14
+ - Model ID: 918730150
15
+ - CO2 Emissions (in grams): 1.8046766441629636
16
+
17
+ ## Validation Metrics
18
+
19
+ - Loss: 0.398613303899765
20
+ - Accuracy: 0.912351981006084
21
+ - Macro F1: 0.717311758991278
22
+ - Micro F1: 0.912351981006084
23
+ - Weighted F1: 0.9110094798809955
24
+ - Macro Precision: 0.7211917136609866
25
+ - Micro Precision: 0.912351981006084
26
+ - Weighted Precision: 0.9102294701380585
27
+ - Macro Recall: 0.714852045042265
28
+ - Micro Recall: 0.912351981006084
29
+ - Weighted Recall: 0.912351981006084
30
+
31
+
32
+ ## Usage
33
+
34
+ You can use cURL to access this model:
35
+
36
+ ```
37
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/Yah216/autotrain-Poem_Rawiy_detection-918730150
38
+ ```
39
+
40
+ Or Python API:
41
+
42
+ ```
43
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
44
+
45
+ model = AutoModelForSequenceClassification.from_pretrained("Yah216/autotrain-Poem_Rawiy_detection-918730150", use_auth_token=True)
46
+
47
+ tokenizer = AutoTokenizer.from_pretrained("Yah216/autotrain-Poem_Rawiy_detection-918730150", use_auth_token=True)
48
+
49
+ inputs = tokenizer("I love AutoTrain", return_tensors="pt")
50
+
51
+ outputs = model(**inputs)
52
+ ```
config.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "_num_labels": 35,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "directionality": "bidi",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "\u0621",
15
+ "1": "\u0624",
16
+ "2": "\u0627",
17
+ "3": "\u0628",
18
+ "4": "\u062a",
19
+ "5": "\u062b",
20
+ "6": "\u062c",
21
+ "7": "\u062d",
22
+ "8": "\u062e",
23
+ "9": "\u062f",
24
+ "10": "\u0630",
25
+ "11": "\u0631",
26
+ "12": "\u0632",
27
+ "13": "\u0633",
28
+ "14": "\u0634",
29
+ "15": "\u0635",
30
+ "16": "\u0636",
31
+ "17": "\u0637",
32
+ "18": "\u0637\u0646",
33
+ "19": "\u0638",
34
+ "20": "\u0639",
35
+ "21": "\u063a",
36
+ "22": "\u0641",
37
+ "23": "\u0642",
38
+ "24": "\u0643",
39
+ "25": "\u0644",
40
+ "26": "\u0644\u0627",
41
+ "27": "\u0645",
42
+ "28": "\u0646",
43
+ "29": "\u0647",
44
+ "30": "\u0647\u0640",
45
+ "31": "\u0647\u0646",
46
+ "32": "\u0648",
47
+ "33": "\u0649",
48
+ "34": "\u064a"
49
+ },
50
+ "initializer_range": 0.02,
51
+ "intermediate_size": 3072,
52
+ "label2id": {
53
+ "\u0621": 0,
54
+ "\u0624": 1,
55
+ "\u0627": 2,
56
+ "\u0628": 3,
57
+ "\u062a": 4,
58
+ "\u062b": 5,
59
+ "\u062c": 6,
60
+ "\u062d": 7,
61
+ "\u062e": 8,
62
+ "\u062f": 9,
63
+ "\u0630": 10,
64
+ "\u0631": 11,
65
+ "\u0632": 12,
66
+ "\u0633": 13,
67
+ "\u0634": 14,
68
+ "\u0635": 15,
69
+ "\u0636": 16,
70
+ "\u0637": 17,
71
+ "\u0637\u0646": 18,
72
+ "\u0638": 19,
73
+ "\u0639": 20,
74
+ "\u063a": 21,
75
+ "\u0641": 22,
76
+ "\u0642": 23,
77
+ "\u0643": 24,
78
+ "\u0644": 25,
79
+ "\u0644\u0627": 26,
80
+ "\u0645": 27,
81
+ "\u0646": 28,
82
+ "\u0647": 29,
83
+ "\u0647\u0640": 30,
84
+ "\u0647\u0646": 31,
85
+ "\u0648": 32,
86
+ "\u0649": 33,
87
+ "\u064a": 34
88
+ },
89
+ "layer_norm_eps": 1e-12,
90
+ "max_length": 64,
91
+ "max_position_embeddings": 512,
92
+ "model_type": "bert",
93
+ "num_attention_heads": 12,
94
+ "num_hidden_layers": 12,
95
+ "pad_token_id": 0,
96
+ "padding": "max_length",
97
+ "pooler_fc_size": 768,
98
+ "pooler_num_attention_heads": 12,
99
+ "pooler_num_fc_layers": 3,
100
+ "pooler_size_per_head": 128,
101
+ "pooler_type": "first_token_transform",
102
+ "position_embedding_type": "absolute",
103
+ "problem_type": "single_label_classification",
104
+ "torch_dtype": "float32",
105
+ "transformers_version": "4.15.0",
106
+ "type_vocab_size": 2,
107
+ "use_cache": true,
108
+ "vocab_size": 50000
109
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a4114d4ceff1db412443e3d3c7e9b63072cf6cab13898809d10f15071d087a5
3
+ size 497957165
sample_input.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:793aeb731778dc8d41c79d543ad2545f501e7b2a4e3f4644dac9c98ea4b37a75
3
+ size 2848
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": "/app/.cache/huggingface/transformers/ced2588a523d32e846c16950f459b5d40bf5cbc9bef24db9c56fc52e2cf2e7ac.6a644b330fad284f98393c5832f71bce43df6855fa6ac7c9e44ed6271b708170", "name_or_path": "AutoTrain", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff