Matthew Franglen commited on
Commit
cdea156
1 Parent(s): cfa80ae

Drop the old convert code

Browse files

That was from the blog and was a bit wip

Files changed (1) hide show
  1. src/convert.py +0 -276
src/convert.py DELETED
@@ -1,276 +0,0 @@
1
- from typing import Optional
2
-
3
- import Levenshtein
4
- import pandas as pd
5
-
6
- from .data import read_aste_file, read_sem_eval_file
7
- from .types import CharacterIndices, WordSpan
8
-
9
-
10
- def get_original_text(
11
- aste_file: str,
12
- sem_eval_file: str,
13
- debug: bool = False,
14
- ) -> pd.DataFrame:
15
- approximate_matches = 0
16
-
17
- def best_match(text: str) -> str:
18
- comparison = text.replace(" ", "")
19
- if comparison in comparison_to_text:
20
- return comparison_to_text[comparison]
21
-
22
- nonlocal approximate_matches
23
- approximate_matches += 1
24
- distances = sem_eval_comparison.apply(
25
- lambda se_comparison: Levenshtein.distance(comparison, se_comparison)
26
- )
27
- best = sem_eval_df.iloc[distances.argmin()].text
28
- return best
29
-
30
- sem_eval_df = read_sem_eval_file(sem_eval_file)
31
- sem_eval_comparison = sem_eval_df.text.str.replace(" ", "")
32
- comparison_to_text = dict(zip(sem_eval_comparison, sem_eval_df.text))
33
-
34
- aste_df = read_aste_file(aste_file)
35
- aste_df = aste_df.rename(columns={"text": "preprocessed_text"})
36
- aste_df["text"] = aste_df.preprocessed_text.apply(best_match)
37
- if debug:
38
- print(f"Read {len(aste_df):,} rows")
39
- print(f"Had to use {approximate_matches:,} approximate matches")
40
- return aste_df[["text", "preprocessed_text", "triples"]]
41
-
42
-
43
- def edit(original: str, preprocessed: str) -> list[Optional[int]]:
44
- indices: list[Optional[int]] = list(range(len(preprocessed)))
45
- for operation, _source_position, destination_position in Levenshtein.editops(
46
- preprocessed, original
47
- ):
48
- if operation == "replace":
49
- indices[destination_position] = None
50
- elif operation == "insert":
51
- indices.insert(destination_position, None)
52
- elif operation == "delete":
53
- del indices[destination_position]
54
- return indices
55
-
56
-
57
- def has_unmapped(indicies: list[Optional[int]]) -> bool:
58
- return any(index is None for index in indicies)
59
-
60
-
61
- def has_unmapped_non_space(row: pd.Series) -> bool:
62
- letter_and_index: list[tuple[str, Optional[int]]] = list(
63
- zip(row.text, row.text_indices)
64
- )
65
- return any(index is None for letter, index in letter_and_index if letter != " ")
66
-
67
-
68
- def row_to_character_indices(row: pd.Series) -> pd.Series:
69
- try:
70
- return pd.Series(
71
- to_character_indices(
72
- triplet=row.triples,
73
- preprocessed=row.preprocessed_text,
74
- text=row.text,
75
- text_indices=row.text_indices,
76
- )
77
- )
78
- except:
79
- print(f"failed to process row {row.name}")
80
- print(row)
81
- raise
82
-
83
-
84
- def to_character_indices(
85
- *,
86
- triplet: tuple[tuple[int], tuple[int], str],
87
- preprocessed: str,
88
- text: str,
89
- text_indices: list[Optional[int]],
90
- ) -> CharacterIndices:
91
- def find_start_index(span: WordSpan) -> int:
92
- # the starting letter in the lookup can be missing or None
93
- # this would cause a lookup failure
94
- # to recover from this we can find the following letter index and backtrack
95
- for index in range(span.start_index, span.end_index):
96
- try:
97
- text_index = text_indices.index(index)
98
- for _ in range(index - span.start_index):
99
- if text_index - 1 <= 0:
100
- break
101
- if text_indices[text_index - 1] is not None:
102
- break
103
- text_index -= 1
104
- return text_index
105
- except ValueError:
106
- pass
107
- # not present in list
108
- raise ValueError(f"cannot find any part of {span}")
109
-
110
- def find_end_index(span: WordSpan) -> int:
111
- # the ending letter in the lookup can be missing or None
112
- # this would cause a lookup failure
113
- # to recover from this we can find the preceding letter index and backtrack
114
- for index in range(span.end_index - 1, span.start_index - 1, -1):
115
- try:
116
- text_index = text_indices.index(index)
117
- for _ in range(span.end_index - index):
118
- if text_index + 1 >= len(text_indices):
119
- break
120
- if text_indices[text_index + 1] is not None:
121
- break
122
- text_index += 1
123
- return text_index
124
- except ValueError:
125
- pass
126
- # not present in list
127
- raise ValueError(f"cannot find any part of {span}")
128
-
129
- def to_indices(span: tuple[int]) -> tuple[int, int]:
130
- word_start = span[0]
131
- word_start_span = word_indices[word_start]
132
-
133
- word_end = span[-1]
134
- word_end_span = word_indices[word_end]
135
-
136
- start_index = find_start_index(word_start_span)
137
- end_index = find_end_index(word_end_span)
138
- return start_index, end_index
139
-
140
- aspect_span, opinion_span, sentiment = triplet
141
- assert is_sequential(aspect_span), f"aspect span not sequential: {aspect_span}"
142
- assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
143
- assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
144
-
145
- word_indices = WordSpan.to_spans(preprocessed)
146
-
147
- aspect_start_index, aspect_end_index = to_indices(aspect_span)
148
- aspect_term = text[aspect_start_index : aspect_end_index + 1]
149
- opinion_start_index, opinion_end_index = to_indices(opinion_span)
150
- opinion_term = text[opinion_start_index : opinion_end_index + 1]
151
-
152
- nice_sentiment = {
153
- "POS": "positive",
154
- "NEG": "negative",
155
- "NEU": "neutral",
156
- }[sentiment]
157
-
158
- return CharacterIndices(
159
- aspect_start_index=aspect_start_index,
160
- aspect_end_index=aspect_end_index,
161
- aspect_term=aspect_term,
162
- opinion_start_index=opinion_start_index,
163
- opinion_end_index=opinion_end_index,
164
- opinion_term=opinion_term,
165
- sentiment=nice_sentiment,
166
- )
167
-
168
-
169
- def convert_sem_eval_text(
170
- aste_file: str,
171
- sem_eval_file: str,
172
- debug: bool = False,
173
- ) -> pd.DataFrame:
174
- df = get_original_text(
175
- aste_file=aste_file,
176
- sem_eval_file=sem_eval_file,
177
- debug=debug,
178
- )
179
- df = df.explode("triples")
180
- df = df.reset_index(drop=False)
181
- df["text_indices"] = df.apply(
182
- lambda row: edit(original=row.text, preprocessed=row.preprocessed_text),
183
- axis="columns",
184
- )
185
- df = df.merge(
186
- df.apply(row_to_character_indices, axis="columns"),
187
- left_index=True,
188
- right_index=True,
189
- )
190
- df = df.drop(columns=["preprocessed_text", "triples", "text_indices"])
191
- return df
192
-
193
-
194
- def convert_aste_text(aste_file: str) -> pd.DataFrame:
195
- df = read_aste_file(aste_file)
196
- df = df.explode("triples")
197
- df = df.reset_index(drop=False)
198
- df = df.merge(
199
- df.apply(aste_row_to_character_indices, axis="columns"),
200
- left_index=True,
201
- right_index=True,
202
- )
203
- df = df.drop(columns=["triples"])
204
- return df
205
-
206
-
207
- def aste_row_to_character_indices(row: pd.Series) -> pd.Series:
208
- try:
209
- return pd.Series(
210
- aste_to_character_indices(
211
- triplet=row.triples,
212
- text=row.text,
213
- )
214
- )
215
- except:
216
- print(f"failed to process row {row.name}")
217
- print(row)
218
- raise
219
-
220
-
221
- def is_sequential(span: tuple[int]) -> bool:
222
- return all(span[index + 1] - span[index] == 1 for index in range(len(span) - 1))
223
-
224
-
225
- def aste_to_character_indices(
226
- *,
227
- triplet: tuple[tuple[int], tuple[int], str],
228
- text: str,
229
- ) -> CharacterIndices:
230
- def to_indices(span: tuple[int]) -> tuple[int, int]:
231
- word_start = span[0]
232
- word_start_span = word_indices[word_start]
233
-
234
- word_end = span[-1]
235
- word_end_span = word_indices[word_end]
236
-
237
- return word_start_span.start_index, word_end_span.end_index - 1
238
-
239
- aspect_span, opinion_span, sentiment = triplet
240
- assert is_sequential(aspect_span), f"aspect span not sequential: {aspect_span}"
241
- assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
242
- assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
243
-
244
- word_indices = WordSpan.to_spans(text)
245
-
246
- aspect_start_index, aspect_end_index = to_indices(aspect_span)
247
- aspect_term = text[aspect_start_index : aspect_end_index + 1]
248
- opinion_start_index, opinion_end_index = to_indices(opinion_span)
249
- opinion_term = text[opinion_start_index : opinion_end_index + 1]
250
-
251
- nice_sentiment = {
252
- "POS": "positive",
253
- "NEG": "negative",
254
- "NEU": "neutral",
255
- }[sentiment]
256
-
257
- return CharacterIndices(
258
- aspect_start_index=aspect_start_index,
259
- aspect_end_index=aspect_end_index,
260
- aspect_term=aspect_term,
261
- opinion_start_index=opinion_start_index,
262
- opinion_end_index=opinion_end_index,
263
- opinion_term=opinion_term,
264
- sentiment=nice_sentiment,
265
- )
266
-
267
-
268
- label_to_sentiment = {
269
- "POS": "positive",
270
- "NEG": "negative",
271
- "NEU": "neutral",
272
- }
273
-
274
-
275
- def to_nice_sentiment(label: str) -> str:
276
- return label_to_sentiment[sentiment]