File size: 1,964 Bytes
916102c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import pandas as pd
import urllib


# format text
def clean_text(text):
    text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')

    new_text = []
    for t in text.split():
        # MAKE SURE to check lowercase
        t = '@user' if t.startswith('@') and len(t) > 1 and t.replace(
            '@', '').lower() not in verified_users else t
        t = '{URL}' if t.startswith('http') else t
        new_text.append(t)

    return ' '.join(new_text)


train = pd.read_csv('./emotion/2018-E-c-En-train.txt', sep='\t')
validation = pd.read_csv('./emotion/2018-E-c-En-dev.txt', sep='\t')
test = pd.read_csv('./emotion/2018-E-c-En-test-gold.txt', sep='\t')

sem_emotions = train.columns.difference(['ID', 'Tweet', 'split', 'dataset'])

# keep class mapping
with open('../data/tweet_emotion/map.txt', 'w') as f:
    for idx, em in enumerate(sem_emotions):
        f.write(f'{em},{idx}\n')


cols_to_keep = ['text', 'gold_label_list']
# get list of verified users
verified_users = urllib.request.urlopen(
    'https://raw.githubusercontent.com/cardiffnlp/timelms/main/data/verified_users.v091122.txt').readlines()
verified_users = [x.decode().strip('\n').lower() for x in verified_users]

# clean  datasets
train['gold_label_list'] = train[sem_emotions].values.tolist()
train['text'] = train['Tweet']
train['text'] = train['text'].apply(clean_text)
train[cols_to_keep].to_json('../data/tweet_emotion/train.jsonl', lines=True, orient='records')

validation['gold_label_list'] = validation[sem_emotions].values.tolist()
validation['text'] = validation['Tweet']
validation['text'] = validation['text'].apply(clean_text)
validation[cols_to_keep].to_json('../data/tweet_emotion/validation.jsonl', lines=True, orient='records')

test['gold_label_list'] = test[sem_emotions].values.tolist()
test['text'] = test['Tweet']
test['text'] = test['text'].apply(clean_text)
test[cols_to_keep].to_json('../data/tweet_emotion/test.jsonl', lines=True, orient='records')