system HF staff commited on
Commit
8da6038
1 Parent(s): 5c64685

Commit From AutoTrain

Browse files
.gitattributes CHANGED
@@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
30
+ *.pkl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: autotrain
3
+ language: ar
4
+ widget:
5
+ - text: "I love AutoTrain 🤗"
6
+ datasets:
7
+ - zenkri/autotrain-data-Arabic_Poetry_by_Subject-1d8ba412
8
+ co2_eq_emissions: 0.06170374019107819
9
+ ---
10
+
11
+ # Model Trained Using AutoTrain
12
+
13
+ - Problem type: Multi-class Classification
14
+ - Model ID: 920730227
15
+ - CO2 Emissions (in grams): 0.06170374019107819
16
+
17
+ ## Validation Metrics
18
+
19
+ - Loss: 0.5905918478965759
20
+ - Accuracy: 0.8687837028160575
21
+ - Macro F1: 0.7777187122151491
22
+ - Micro F1: 0.8687837028160575
23
+ - Weighted F1: 0.8673230166815299
24
+ - Macro Precision: 0.796117563625016
25
+ - Micro Precision: 0.8687837028160575
26
+ - Weighted Precision: 0.8692944353097692
27
+ - Macro Recall: 0.7732013751753718
28
+ - Micro Recall: 0.8687837028160575
29
+ - Weighted Recall: 0.8687837028160575
30
+
31
+
32
+ ## Usage
33
+
34
+ You can use cURL to access this model:
35
+
36
+ ```
37
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/zenkri/autotrain-Arabic_Poetry_by_Subject-920730227
38
+ ```
39
+
40
+ Or Python API:
41
+
42
+ ```
43
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
44
+
45
+ model = AutoModelForSequenceClassification.from_pretrained("zenkri/autotrain-Arabic_Poetry_by_Subject-920730227", use_auth_token=True)
46
+
47
+ tokenizer = AutoTokenizer.from_pretrained("zenkri/autotrain-Arabic_Poetry_by_Subject-920730227", use_auth_token=True)
48
+
49
+ inputs = tokenizer("I love AutoTrain", return_tensors="pt")
50
+
51
+ outputs = model(**inputs)
52
+ ```
config.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "_num_labels": 25,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "\u0627\u0628\u062a\u0647\u0627\u0644",
15
+ "1": "\u0627\u0639\u062a\u0630\u0627\u0631",
16
+ "2": "\u0627\u0644\u0627\u0646\u0627\u0634\u064a\u062f",
17
+ "3": "\u0627\u0644\u0645\u0639\u0644\u0642\u0627\u062a",
18
+ "4": "\u062c\u0648\u062f",
19
+ "5": "\u062d\u0632\u064a\u0646\u0647",
20
+ "6": "\u062d\u0643\u0645\u0629",
21
+ "7": "\u062f\u064a\u0646\u064a\u0629",
22
+ "8": "\u0630\u0645",
23
+ "9": "\u0631\u062b\u0627\u0621",
24
+ "10": "\u0631\u062d\u0645\u0629",
25
+ "11": "\u0631\u0648\u0645\u0646\u0633\u064a\u0647",
26
+ "12": "\u0633\u064a\u0627\u0633\u064a\u0629",
27
+ "13": "\u0634\u0648\u0642",
28
+ "14": "\u0635\u0628\u0631",
29
+ "15": "\u0639\u0627\u0645\u0647",
30
+ "16": "\u0639\u062a\u0627\u0628",
31
+ "17": "\u0639\u062f\u0644",
32
+ "18": "\u063a\u0632\u0644",
33
+ "19": "\u0641\u0631\u0627\u0642",
34
+ "20": "\u0642\u0635\u064a\u0631\u0647",
35
+ "21": "\u0645\u062f\u062d",
36
+ "22": "\u0646\u0635\u064a\u062d\u0629",
37
+ "23": "\u0647\u062c\u0627\u0621",
38
+ "24": "\u0648\u0637\u0646\u064a\u0647"
39
+ },
40
+ "initializer_range": 0.02,
41
+ "intermediate_size": 3072,
42
+ "label2id": {
43
+ "\u0627\u0628\u062a\u0647\u0627\u0644": 0,
44
+ "\u0627\u0639\u062a\u0630\u0627\u0631": 1,
45
+ "\u0627\u0644\u0627\u0646\u0627\u0634\u064a\u062f": 2,
46
+ "\u0627\u0644\u0645\u0639\u0644\u0642\u0627\u062a": 3,
47
+ "\u062c\u0648\u062f": 4,
48
+ "\u062d\u0632\u064a\u0646\u0647": 5,
49
+ "\u062d\u0643\u0645\u0629": 6,
50
+ "\u062f\u064a\u0646\u064a\u0629": 7,
51
+ "\u0630\u0645": 8,
52
+ "\u0631\u062b\u0627\u0621": 9,
53
+ "\u0631\u062d\u0645\u0629": 10,
54
+ "\u0631\u0648\u0645\u0646\u0633\u064a\u0647": 11,
55
+ "\u0633\u064a\u0627\u0633\u064a\u0629": 12,
56
+ "\u0634\u0648\u0642": 13,
57
+ "\u0635\u0628\u0631": 14,
58
+ "\u0639\u0627\u0645\u0647": 15,
59
+ "\u0639\u062a\u0627\u0628": 16,
60
+ "\u0639\u062f\u0644": 17,
61
+ "\u063a\u0632\u0644": 18,
62
+ "\u0641\u0631\u0627\u0642": 19,
63
+ "\u0642\u0635\u064a\u0631\u0647": 20,
64
+ "\u0645\u062f\u062d": 21,
65
+ "\u0646\u0635\u064a\u062d\u0629": 22,
66
+ "\u0647\u062c\u0627\u0621": 23,
67
+ "\u0648\u0637\u0646\u064a\u0647": 24
68
+ },
69
+ "layer_norm_eps": 1e-12,
70
+ "max_length": 64,
71
+ "max_position_embeddings": 512,
72
+ "model_type": "bert",
73
+ "num_attention_heads": 12,
74
+ "num_hidden_layers": 12,
75
+ "output_past": true,
76
+ "pad_token_id": 0,
77
+ "padding": "max_length",
78
+ "position_embedding_type": "absolute",
79
+ "problem_type": "single_label_classification",
80
+ "torch_dtype": "float32",
81
+ "transformers_version": "4.15.0",
82
+ "type_vocab_size": 2,
83
+ "use_cache": true,
84
+ "vocab_size": 32000
85
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6c57e30a3bb6a79081f724b4bbc846de6d190aef27823878ee9f820fef8e255
3
+ size 442630381
sample_input.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:734356c306b4018302eb2904640755c58c7a8f0d22ea34a909bba550fd9ed542
3
+ size 2848
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "full_tokenizer_file": null, "name_or_path": "AutoTrain", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff