File size: 4,849 Bytes
05759f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
from fastcore.basics import listify
import unicodedata
import unidecode
from string import punctuation
import html
from itertools import groupby
import fasttext
import re

control_char_regex = re.compile(r'[\r\n\t]+')
url_regex = re.compile(
    r'((http|https)\:\/\/)?[a-zA-Z0-9\.\/\?\:@\-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*')
username_regex = re.compile(r'(^|[^@\w])@(\w{1,15})\b')

FASTTEXT_MODEL_PATH = 'lid.176.bin'
fasttext_model = fasttext.load_model(FASTTEXT_MODEL_PATH)


def fix_html(example):
    "From fastai: 'Fix messy things we've seen in documents'"
    tmp_ls = []
    for e in listify(example['text']):
        e = e.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
            '#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
            '\\"', '"').replace('<unk>', ' ').replace(' @.@ ', '.').replace(' @-@ ', '-').replace('...', ' …')
        tmp_ls.append(html.unescape(e))

    example['text'] = tmp_ls
    return example


def remove_control_char(example):
    tmp_ls = []
    for e in listify(example['text']):
        tmp_ls.append(re.sub(control_char_regex, '.', e))

    example['text'] = tmp_ls
    return example


def remove_remaining_control_chars(example):
    tmp_ls = []
    for e in listify(example['text']):
        tmp_ls.append(
            ''.join(ch for ch in e if unicodedata.category(ch)[0] != 'C'))

    example['text'] = tmp_ls
    return example


def remove_unicode_symbols(example):
    tmp_ls = []
    for e in listify(example['text']):
        tmp_ls.append(
            ''.join(ch for ch in e if unicodedata.category(ch)[0] != 'So'))

    example['text'] = tmp_ls
    return example


def standardise_punc(example):
    transl_table = dict([(ord(x), ord(y))
                         for x, y in zip(u"‘’´“”–-",  u"'''\"\"--")])
    tmp_ls = []
    for e in listify(example['text']):
        e = e.translate(transl_table)
        e = re.sub(r"[^a-zA-Z0-9ÖÄÅöäå .,'%&€$=*@+;<>/()!?%:-]", " ", e)
        tmp_ls.append(e)

    example['text'] = tmp_ls
    return example


def remove_news_tags(example):
    tmp_ls = []
    for e in listify(example['text']):
        e = re.sub(r"(<[A-Z].+?>)|(</[A-Z].+?>)", "", e)
        tmp_ls.append(e)

    example['text'] = tmp_ls
    return example


def replace_urls(example):
    filler, tmp_ls = '', []
    for e in listify(example['text']):
        e = re.sub(r"(<a.+?>)|(</a>)|(<ref.+?>)", "", e)
        e = re.sub(url_regex, filler, e)
        tmp_ls.append(e)

    example['text'] = tmp_ls
    return example


def replace_usernames(example):
    filler, tmp_ls = '', []
    for e in listify(example['text']):
        occ = e.count('@')
        for _ in range(occ):
            e = e.replace('@<user>', f'{filler}')
            # replace other user handles by filler
            e = re.sub(username_regex, filler, e)
            # add spaces between, and remove double spaces again
            e = e.replace(filler, f' {filler} ')
            e = ' '.join(e.split())
        tmp_ls.append(e)

    example['text'] = tmp_ls
    return example


def remove_duplicate_words_punctuation(example):
    tmp_ls = []
    for e in listify(example['text']):
        e = re.sub(r'\b(\w+)( \1\b)+', r'\1', e)
        punc = set(punctuation)
        newtext = []
        for k, g in groupby(e):
            if k in punc:
                newtext.append(k)
            else:
                newtext.extend(g)
        e = ''.join(newtext)
        tmp_ls.append(e)

    example['text'] = tmp_ls
    return example


def remove_multi_space(example):
    tmp_ls = []
    for e in listify(example['text']):
        tmp_ls.append(' '.join(e.split()))

    example['text'] = tmp_ls
    return example


def count_alphabet(batch):
    batch['alphabet_len'] = len(re.findall(r'[äÄöÖåÅa-zA-Z]', batch['text']))
    return batch


def count_numbers(batch):
    batch['number_len'] = len(re.findall(r'[0-9]', batch['text']))
    return batch


def count_upper(batch):
    batch['upper_len'] = len(re.findall(r'[ÄÖÅA-Z]', batch['text']))
    return batch


def count_str_len(batch):
    batch['total_len'] = len(batch['text'])
    return batch


def predict_lang(batch):
    pred = fasttext_model.predict(batch['text'])
    batch['predicted_lang'] = pred[0][0]
    batch['predicted_lang_percentage'] = float(pred[1][0])
    return batch


def calculate_alphabet_ratio(batch):
    batch['alphabet_ratio'] = int(
        batch['alphabet_len']) / int(batch['total_len'])
    return batch


def calculate_number_ratio(batch):
    batch['number_ratio'] = int(batch['number_len']) / int(batch['total_len'])
    return batch


def calculate_upper_ratio(batch):
    batch['upper_ratio'] = int(batch['upper_len']) / int(batch['total_len'])
    return batch