File size: 3,269 Bytes
6c8a3a5
c7d819f
6c8a3a5
c7d819f
 
41ff77b
cb958fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6c8a3a5
 
c7d819f
6f40821
9f5517c
c7d819f
 
6c8a3a5
 
cb958fb
6c8a3a5
 
cb958fb
6c8a3a5
 
 
 
cb958fb
 
6c8a3a5
cb958fb
 
 
 
6c8a3a5
 
 
cb958fb
6c8a3a5
 
cb958fb
 
 
 
 
6c8a3a5
cb958fb
 
 
 
6c8a3a5
 
 
9f5517c
 
6c8a3a5
cb958fb
 
 
6c8a3a5
 
 
b0f3e5c
6c8a3a5
b0f3e5c
 
 
 
 
 
 
606e111
 
b0f3e5c
 
 
 
41ff77b
b0f3e5c
 
 
 
 
6c8a3a5
b0f3e5c
6c8a3a5
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import os
import json
from typing import Dict


sample = "#NewVideo Cray Dollas- Water- Ft. Charlie Rose- (Official Music Video)- {{URL}} via {@YouTube@} #watchandlearn {{USERNAME}}"
bib = """
@inproceedings{dimosthenis-etal-2022-twitter,
    title = "{T}witter {T}opic {C}lassification",
    author = "Antypas, Dimosthenis  and
    Ushio, Asahi  and
    Camacho-Collados, Jose  and
    Neves, Leonardo  and
    Silva, Vitor  and
    Barbieri, Francesco",
    booktitle = "Proceedings of the 29th International Conference on Computational Linguistics",
    month = oct,
    year = "2022",
    address = "Gyeongju, Republic of Korea",
    publisher = "International Committee on Computational Linguistics"
}
"""


def get_readme(model_name: str,
               metric: str,
               language_model,
               extra_desc: str = ''):
    with open(metric) as f:
      metric = json.load(f)
    return f"""---
datasets:
- cardiffnlp/tweet_topic_multi
metrics:
- f1
- accuracy
model-index:
- name: {model_name}
  results:
  - task:
      type: text-classification
      name: Text Classification
    dataset:
      name: cardiffnlp/tweet_topic_multi
      type: cardiffnlp/tweet_topic_multi
      args: cardiffnlp/tweet_topic_multi
      split: test_2021 
    metrics:
    - name: F1
      type: f1
      value: {metric['test/eval_f1']}
    - name: F1 (macro)
      type: f1_macro
      value: {metric['test/eval_f1_macro']}
    - name: Accuracy
      type: accuracy
      value: {metric['test/eval_accuracy']}
pipeline_tag: text-classification
widget:
- text: "I'm sure the {"{@Tampa Bay Lightning@}"} would’ve rather faced the Flyers but man does their experience versus the Blue Jackets this year and last help them a lot versus this Islanders team. Another meat grinder upcoming for the good guys"
  example_title: "Example 1"
- text: "Love to take night time bike rides at the jersey shore. Seaside Heights boardwalk. Beautiful weather. Wishing everyone a safe Labor Day weekend in the US." 
  example_title: "Example 2"
---
# {model_name}

This model is a fine-tuned version of [{language_model}](https://huggingface.co/{language_model}) on the [tweet_topic_multi](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi). {extra_desc}
Fine-tuning script can be found [here](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi/blob/main/lm_finetuning.py). It achieves the following results on the test_2021 set:

- F1 (micro): {metric['test/eval_f1']}
- F1 (macro): {metric['test/eval_f1_macro']}
- Accuracy: {metric['test/eval_accuracy']}


### Usage

```python
import math
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer

def sigmoid(x):
  return 1 / (1 + math.exp(-x))
  
tokenizer = AutoTokenizer.from_pretrained("{model_name}")
model = AutoModelForSequenceClassification.from_pretrained("{model_name}", problem_type="multi_label_classification")
model.eval()
class_mapping = model.config.id2label

with torch.no_grad():
  text = {sample}
  tokens = tokenizer(text, return_tensors='pt')
  output = model(**tokens)
  flags = [sigmoid(s) > 0.5 for s in output[0][0].detach().tolist()]
  topic = [class_mapping[n] for n, i in enumerate(flags) if i]
print(topic)
```

### Reference

```
{bib}
```
"""