q3fer commited on
Commit
b5b1bad
1 Parent(s): e706775

Upload Tensorflow model

Browse files
Files changed (3) hide show
  1. README.md +45 -1
  2. config.json +56 -0
  3. tf_model.h5 +3 -0
README.md CHANGED
@@ -1,3 +1,47 @@
1
  ---
2
- license: mit
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ tags:
3
+ - generated_from_keras_callback
4
+ model-index:
5
+ - name: organon-fallacy-classification
6
+ results: []
7
  ---
8
+
9
+ <!-- This model card has been generated automatically according to the information Keras had access to. You should
10
+ probably proofread and complete it, then remove this comment. -->
11
+
12
+ # organon-fallacy-classification
13
+
14
+ This model is a fine-tuned version of [q3fer/fallacy_classifier_01](https://huggingface.co/q3fer/fallacy_classifier_01) on an unknown dataset.
15
+ It achieves the following results on the evaluation set:
16
+
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - optimizer: None
36
+ - training_precision: float32
37
+
38
+ ### Training results
39
+
40
+
41
+
42
+ ### Framework versions
43
+
44
+ - Transformers 4.25.1
45
+ - TensorFlow 2.9.2
46
+ - Datasets 2.8.0
47
+ - Tokenizers 0.13.2
config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "q3fer/fallacy_classifier_01",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "ad hominem",
13
+ "1": "ad populum",
14
+ "2": "appeal to emotion",
15
+ "3": "circular reasoning",
16
+ "4": "equivocation",
17
+ "5": "fallacy of credibility",
18
+ "6": "fallacy of extension",
19
+ "7": "fallacy of logic",
20
+ "8": "fallacy of relevance",
21
+ "9": "false causality",
22
+ "10": "false dilemma",
23
+ "11": "faulty generalization",
24
+ "12": "intentional",
25
+ "13": "miscellaneous"
26
+ },
27
+ "initializer_range": 0.02,
28
+ "label2id": {
29
+ "ad hominem": 0,
30
+ "ad populum": 1,
31
+ "appeal to emotion": 2,
32
+ "circular reasoning": 3,
33
+ "equivocation": 4,
34
+ "fallacy of credibility": 5,
35
+ "fallacy of extension": 6,
36
+ "fallacy of logic": 7,
37
+ "fallacy of relevance": 8,
38
+ "false causality": 9,
39
+ "false dilemma": 10,
40
+ "faulty generalization": 11,
41
+ "intentional": 12,
42
+ "miscellaneous": 13
43
+ },
44
+ "max_position_embeddings": 512,
45
+ "model_type": "distilbert",
46
+ "n_heads": 12,
47
+ "n_layers": 6,
48
+ "pad_token_id": 0,
49
+ "qa_dropout": 0.1,
50
+ "seq_classif_dropout": 0.2,
51
+ "sinusoidal_pos_embds": false,
52
+ "tie_weights_": true,
53
+ "torch_dtype": "float32",
54
+ "transformers_version": "4.25.1",
55
+ "vocab_size": 30522
56
+ }
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4157a11b18f429cabd361d304881c8c3fe1f34b498a4cedeb3bb45c9dfbbd340
3
+ size 267993256