sileod commited on
Commit
aa8ae86
1 Parent(s): 37ca756

Update process_underscores.py

Browse files
Files changed (1) hide show
  1. process_underscores.py +502 -502
process_underscores.py CHANGED
@@ -23,531 +23,531 @@ class EDict(dict):
23
 
24
  PY3 = sys.version_info[0] == 3
25
  if not PY3:
26
- input = raw_input
27
 
28
 
29
  gum_docs = {
30
- "GUM_reddit_macroeconomics": [
31
- {"year": "2017", "month": "09", "id": "6zm74h", "type": "post","source":"undef"},
32
- {"year": "2017", "month": "09", "id": "dmwwqlt", "type":"comment","source":"undef"}
33
- ],
34
- "GUM_reddit_stroke": [
35
- {"year": "2017", "month": "08", "id": "6ws3eh", "type": "post","source":"undef"},
36
- {"year": "2017", "month": "08", "id": "dmaei1x", "type":"comment","source":"undef"},
37
- {"year": "2017", "month": "08", "id": "dmaiwsm", "type":"comment","source":"undef"},
38
- {"year": "2017", "month": "09", "id": "dmkx8bk", "type":"comment","source":"undef"},
39
- {"year": "2017", "month": "09", "id": "dmm1327", "type":"comment","source":"undef"},
40
- {"year": "2017", "month": "08", "id": "dmaoodn", "type":"comment","source":"undef"}
41
- ],
42
- "GUM_reddit_polygraph": [
43
- {"year": "2014", "month": "12", "id": "2q6qnv", "type": "post","source":"undef"}
44
- ],
45
- "GUM_reddit_ring": [
46
- {"year": "2016", "month": "09", "id": "5570x1", "type": "post","source":"undef"},
47
- {"year": "2016", "month": "09", "id": "d885ma0", "type":"comment","source":"undef"},
48
- {"year": "2016", "month": "09", "id": "d8880w7", "type":"comment","source":"undef"},
49
- {"year": "2016", "month": "09", "id": "d88u7dg", "type":"comment","source":"undef"},
50
- {"year": "2016", "month": "09", "id": "d88unu3", "type":"comment","source":"undef"},
51
- {"year": "2016", "month": "09", "id": "d88v0sz", "type":"comment","source":"undef"},
52
- {"year": "2016", "month": "09", "id": "d88xaqu", "type":"comment","source":"undef"},
53
- {"year": "2016", "month": "10", "id": "d893mj9", "type":"comment","source":"undef"},
54
- {"year": "2016", "month": "09", "id": "d88s4bb", "type":"comment","source":"undef"},
55
- {"year": "2016", "month": "10", "id": "d88zt6x", "type":"comment","source":"undef"}
56
- ],
57
- "GUM_reddit_space": [
58
- {"year": "2016", "month": "08", "id": "50hx5c", "type": "post","source":"undef"},
59
- {"year": "2016", "month": "08", "id": "d7471k5", "type":"comment","source":"undef"},
60
- {"year": "2016", "month": "08", "id": "d74i5ka", "type":"comment","source":"undef"},
61
- {"year": "2016", "month": "08", "id": "d74ppi0", "type":"comment","source":"undef"}
62
- ],
63
- "GUM_reddit_superman": [
64
- #{"year": "2017", "month": "04", "id": "68e0u3", "type": "post", "title_only": True}, # Post title not included in this document
65
- {"year": "2017", "month": "05", "id": "dgys1z8", "type":"comment","source":"undef"}
66
- ],
67
- "GUM_reddit_bobby": [
68
- {"year":"2018","month":"06","id":"8ph56q","type": "post","source":"undef"},
69
- {"year":"2018","month":"06","id":"e0b8zz4","type":"comment","source":"undef"},
70
- {"year":"2018","month":"06","id":"e0dwqlg","type":"comment","source":"undef"},
71
- {"year":"2018","month":"06","id":"e15pcqu","type":"comment","source":"undef"},
72
- {"year":"2018","month":"06","id":"e0dz1mp","type":"comment","source":"undef"},
73
- {"year":"2018","month":"06","id":"e1uuo9e","type":"comment","source":"undef"},
74
- {"year":"2018","month":"06","id":"e0brc9w","type":"comment","source":"undef"},
75
- {"year":"2018","month":"06","id":"e0bz951","type":"comment","source":"undef"}
76
- ],
77
- "GUM_reddit_escape": [
78
- {"year":"2017","month":"05","id":"69r98j","type": "post","source":"undef"},
79
- {"year":"2017","month":"05","id":"dh96n8v","type":"comment","source":"undef"},
80
- {"year":"2017","month":"05","id":"dh9enpe","type":"comment","source":"undef"},
81
- {"year":"2017","month":"05","id":"dht8oyn","type":"comment","source":"undef"},
82
- {"year":"2017","month":"05","id":"dhn0hoe","type":"comment","source":"undef"},
83
- {"year":"2017","month":"07","id":"dk9ted1","type":"comment","source":"undef"},
84
- {"year":"2017","month":"05","id":"dh98kcg","type":"comment","source":"undef"},
85
- {"year":"2017","month":"05","id":"dh9zxej","type":"comment","source":"undef"},
86
- {"year":"2017","month":"05","id":"di9x7j9","type":"comment","source":"undef"},
87
- {"year":"2017","month":"05","id":"di9xsrt","type":"comment","source":"undef"},
88
- {"year":"2017","month":"06","id":"din85zf","type":"comment","source":"undef"},
89
- {"year":"2017","month":"06","id":"dinab0w","type":"comment","source":"undef"},
90
- {"year":"2017","month":"06","id":"dinaggd","type":"comment","source":"undef"},
91
- {"year":"2017","month":"06","id":"dinbyb9","type":"comment","source":"undef"},
92
- {"year":"2017","month":"06","id":"dj65sp1","type":"comment","source":"undef"},
93
- {"year":"2017","month":"06","id":"dizdd8a","type":"comment","source":"undef"},
94
- {"year":"2017","month":"07","id":"dk78qw8","type":"comment","source":"undef"},
95
- {"year":"2017","month":"08","id":"dm0gqc7","type":"comment","source":"undef"},
96
- {"year":"2017","month":"10","id":"domd1r0","type":"comment","source":"undef"},
97
- {"year":"2017","month":"05","id":"dh9irie","type":"comment","source":"undef"},
98
- {"year":"2017","month":"05","id":"dh9iw36","type":"comment","source":"undef"},
99
- {"year":"2017","month":"06","id":"djlcwu5","type":"comment","source":"undef"},
100
- {"year":"2017","month":"06","id":"dlzcxpy","type":"comment","source":"undef"},
101
- {"year":"2017","month":"05","id":"dhabstb","type":"comment","source":"undef"},
102
- {"year":"2017","month":"05","id":"dhbr3m6","type":"comment","source":"undef"},
103
- {"year":"2017","month":"06","id":"diz97qy","type":"comment"}
104
- ],
105
- "GUM_reddit_gender": [
106
- {"year":"2018","month":"09","id":"9e5urs","type":"post","source":"bigquery"},
107
- {"year":"2018","month":"09","id":"e5mg3s7","type":"comment","source":"undef"},
108
- {"year":"2018","month":"09","id":"e5mkpok","type":"comment","source":"bigquery"},
109
- {"year":"2018","month":"09","id":"e5nxbmb","type":"comment","source":"bigquery"},
110
- {"year":"2018","month":"09","id":"e5nzg9j","type":"comment","source":"undef"},
111
- {"year":"2018","month":"09","id":"e5mh94v","type":"comment","source":"undef"},
112
- {"year":"2018","month":"09","id":"e5mmenp","type":"comment","source":"undef"},
113
- {"year":"2018","month":"09","id":"e5ms5u3","type":"comment","source":"undef"}
114
- ],
115
- "GUM_reddit_monsters":[
116
- {"year":"2018","month":"09","id":"9eci2u","type":"post","source":"undef"},
117
- {"year":"2018","month":"09","id":"e5ox2jr","type":"comment","source":"undef"},
118
- {"year":"2018","month":"09","id":"e5p3gtl","type":"comment","source":"undef"},
119
- {"year":"2018","month":"09","id":"e5pnfro","type":"comment","source":"undef"},
120
- {"year":"2018","month":"09","id":"e5q08o4","type":"comment","source":"undef"},
121
- {"year":"2018","month":"09","id":"e5pney1","type":"comment","source":"undef"},
122
- ],
123
- "GUM_reddit_pandas":[
124
- {"year":"2018","month":"09","id":"9e3s9h","type":"post","source":"undef"},
125
- {"year":"2018","month":"09","id":"e5lwy6n","type":"comment","source":"undef"},
126
- {"year":"2018","month":"09","id":"e5m397o","type":"comment","source":"undef"},
127
- {"year":"2018","month":"09","id":"e5m3xgb","type":"comment","source":"undef"},
128
- {"year":"2018","month":"09","id":"e5m3z2e","type":"comment","source":"undef"},
129
- {"year":"2018","month":"09","id":"e5lwbbt","type":"comment","source":"undef"},
130
- {"year":"2018","month":"09","id":"e5m38sr","type":"comment","source":"undef"},
131
- {"year":"2018","month":"09","id":"e5m42cu","type":"comment","source":"undef"},
132
- {"year":"2018","month":"09","id":"e5lvlxm","type":"comment","source":"undef"},
133
- {"year":"2018","month":"09","id":"e5lvqay","type":"comment","source":"undef"},
134
- {"year":"2018","month":"09","id":"e5lw5t6","type":"comment","source":"undef"}, # Blowhole
135
- {"year":"2018","month":"09","id":"e5lwz31","type":"comment","source":"undef"},
136
- {"year":"2018","month":"09","id":"e5lxi0s","type":"comment","source":"undef"},
137
- {"year":"2018","month":"09","id":"e5lwxqq","type":"comment","source":"undef"},
138
- {"year":"2018","month":"09","id":"e5lzv1b","type":"comment","source":"undef"},
139
- {"year":"2018","month":"09","id":"e5m48ag","type":"comment","source":"undef"},
140
- {"year":"2018","month":"09","id":"e5m1yqe","type":"comment","source":"undef"},
141
- {"year":"2018","month":"09","id":"e5lx0sw","type":"comment","source":"undef"},
142
- {"year":"2018","month":"09","id":"e5m2n80","type":"comment","source":"undef"},
143
- {"year":"2018","month":"09","id":"e5m2wrh","type":"comment","source":"undef"},
144
- {"year":"2018","month":"09","id":"e5m3blb","type":"comment","source":"undef"},
145
- {"year":"2018","month":"09","id":"e5lvxoc","type":"comment","source":"undef"},
146
- {"year":"2018","month":"09","id":"e5m1abg","type":"comment","source":"undef"},
147
- {"year":"2018","month":"09","id":"e5m1w5i","type":"comment","source":"undef"},
148
- {"year":"2018","month":"09","id":"e5m3pdi","type":"comment","source":"undef"},
149
- {"year":"2018","month":"09","id":"e5m3ruf","type":"comment","source":"undef"},
150
- {"year":"2018","month":"09","id":"e5m4yu2","type":"comment","source":"undef"},
151
- {"year":"2018","month":"09","id":"e5m5bcb","type":"comment","source":"undef"}
152
- ],
153
- "GUM_reddit_steak": [
154
- {"year":"2015","month":"08","id":"3im341","type":"post","source":"undef"}
155
- ],
156
- "GUM_reddit_card": [
157
- {"year":"2019","month":"08","id":"cmqrwo","type":"post","source":"undef"},
158
- {"year":"2019","month":"08","id":"ew3zrqg","type":"comment","source":"undef"},
159
- {"year":"2019","month":"08","id":"ew43d2c","type":"comment","source":"undef"},
160
- {"year":"2019","month":"08","id":"ew43oks","type":"comment","source":"undef"},
161
- {"year":"2019","month":"08","id":"ew43ymc","type":"comment","source":"undef"},
162
- {"year":"2019","month":"08","id":"ew46h1p","type":"comment","source":"undef"},
163
- {"year":"2019","month":"08","id":"ew46oly","type":"comment","source":"undef"},
164
- {"year":"2019","month":"08","id":"ew46wq7","type":"comment","source":"undef"},
165
- {"year":"2019","month":"08","id":"ew470zc","type":"comment","source":"undef"}
166
- ],
167
- "GUM_reddit_callout": [
168
- {"year":"2019","month":"09","id":"d1eg3u","type":"post","source":"undef"},
169
- {"year":"2019","month":"09","id":"ezkucpg","type":"comment","source":"undef"},
170
- {"year":"2019","month":"09","id":"ezkv0cc","type":"comment","source":"undef"},
171
- {"year":"2019","month":"09","id":"ezkwbx9","type":"comment","source":"undef"},
172
- {"year":"2019","month":"09","id":"ezlh2o6","type":"comment","source":"undef"},
173
- {"year":"2019","month":"09","id":"ezlkajf","type":"comment","source":"undef"},
174
- {"year":"2019","month":"09","id":"ezlnco2","type":"comment","source":"undef"},
175
- {"year":"2019","month":"09","id":"ezo20yy","type":"comment","source":"undef"},
176
- {"year":"2019","month":"09","id":"ezkwcvh","type":"comment","source":"undef"},
177
- {"year":"2019","month":"09","id":"ezl07dm","type":"comment","source":"undef"},
178
- {"year":"2019","month":"09","id":"ezmajm7","type":"comment","source":"undef"},
179
- {"year":"2019","month":"09","id":"ezl1wz3","type":"comment","source":"undef"},
180
- ],
181
- "GUM_reddit_conspiracy": [
182
- {"year":"2019","month":"02","id":"aumhwo","type":"post","source":"undef"},
183
- {"year":"2019","month":"02","id":"eh9rt0n","type":"comment","source":"undef"},
184
- {"year":"2019","month":"02","id":"eh9tvyw","type":"comment","source":"undef"},
185
- {"year":"2019","month":"02","id":"ehc0l2q","type":"comment","source":"undef"},
186
- {"year":"2019","month":"02","id":"ehclwtv","type":"comment","source":"undef"},
187
- {"year":"2019","month":"02","id":"eh9jo5x","type":"comment","source":"undef"},
188
- {"year":"2019","month":"02","id":"ehr2665","type":"comment","source":"undef"},
189
- {"year":"2019","month":"02","id":"eha3c1q","type":"comment","source":"undef"},
190
- {"year":"2019","month":"02","id":"eha5jlq","type":"comment","source":"undef"},
191
- ],
192
- "GUM_reddit_introverts": [
193
- {"year":"2019","month":"06","id":"by820m","type":"post","source":"undef","title_double": True}, # Possible title was repeated by annotator
194
- {"year":"2019","month":"06","id":"eqeik8m","type":"comment","source":"undef"},
195
- {"year":"2019","month":"06","id":"eqfgaeu","type":"comment","source":"undef"},
196
- {"year":"2019","month":"06","id":"eqfplpg","type":"comment","source":"undef"},
197
- {"year":"2019","month":"06","id":"eqg6a5u","type":"comment","source":"undef"},
198
- {"year":"2019","month":"06","id":"eqh6j29","type":"comment","source":"undef"},
199
- {"year":"2019","month":"06","id":"eqhjtwr","type":"comment","source":"undef"},
200
- {"year":"2019","month":"06","id":"eqi2jl3","type":"comment","source":"undef"},
201
- {"year":"2019","month":"06","id":"eqii2kf","type":"comment","source":"undef"},
202
- {"year":"2019","month":"06","id":"eqhlj8j","type":"comment","source":"undef"},
203
-
204
- ],
205
- "GUM_reddit_racial": [
206
- {"year":"2019","month":"09","id":"d1urjk","type":"post","source":"undef"},
207
- {"year":"2019","month":"09","id":"ezq9y6w","type":"comment","source":"bigquery"},
208
- {"year":"2019","month":"09","id":"ezqpqmm","type":"comment","source":"undef"},
209
- {"year":"2019","month":"09","id":"ezq8xs7","type":"comment","source":"undef"},
210
- {"year":"2019","month":"09","id":"ezr55wk","type":"comment","source":"undef"},
211
- ],
212
- "GUM_reddit_social": [
213
- {"year":"2019","month":"09","id":"d1qy3g","type":"post","source":"undef"},
214
- {"year":"2019","month":"09","id":"ezpb3jg","type":"comment","source":"undef"},
215
- {"year":"2019","month":"09","id":"ezpdmy3","type":"comment","source":"undef"},
216
- {"year":"2019","month":"09","id":"ezpjor8","type":"comment","source":"bigquery"},
217
- {"year":"2019","month":"09","id":"ezpiozm","type":"comment","source":"undef"},
218
- {"year":"2019","month":"09","id":"ezpc1ps","type":"comment","source":"undef"},
219
- {"year":"2019","month":"09","id":"ezp9fbh","type":"comment","source":"undef"},
220
- {"year":"2019","month":"09","id":"ezqrumb","type":"comment","source":"undef"},
221
- {"year":"2019","month":"09","id":"ezpe0e6","type":"comment","source":"undef"},
222
- {"year":"2019","month":"09","id":"ezpf71f","type":"comment","source":"undef"},
223
- {"year":"2019","month":"09","id":"ezt7qlf","type":"comment","source":"undef"},
224
- {"year":"2019","month":"09","id":"ezpc4jj","type":"comment","source":"undef"},
225
- {"year":"2019","month":"09","id":"ezpa2e4","type":"comment","source":"undef"},
226
- {"year":"2019","month":"09","id":"ezpfzql","type":"comment","source":"undef"},
227
- {"year":"2019","month":"09","id":"ezpi39v","type":"comment","source":"undef"},
228
- ]
229
  }
230
 
231
  def underscore_files(filenames):
232
- def underscore_rel_field(text):
233
- blanked = []
234
- text = text.replace("<*>","❤")
235
- for c in text:
236
- if c!="❤" and c!=" ":
237
- blanked.append("_")
238
- else:
239
- blanked.append(c)
240
- return "".join(blanked).replace("❤","<*>")
241
-
242
- for f_path in filenames:
243
- skiplen = 0
244
- with io.open(f_path, 'r', encoding='utf8') as fin:
245
- lines = fin.readlines()
246
-
247
- with io.open(f_path, 'w', encoding='utf8', newline="\n") as fout:
248
- output = []
249
- if f_path.endswith(".rels"):
250
- for l, line in enumerate(lines):
251
- line = line.strip()
252
- if "\t" in line and l > 0:
253
- doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label = line.split("\t")
254
- if "GUM" in doc and "reddit" not in doc:
255
- output.append(line)
256
- continue
257
- unit1_txt = underscore_rel_field(unit1_txt)
258
- unit2_txt = underscore_rel_field(unit2_txt)
259
- unit1_sent = underscore_rel_field(unit1_sent)
260
- unit2_sent = underscore_rel_field(unit2_sent)
261
- fields = doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label
262
- line = "\t".join(fields)
263
- output.append(line)
264
- else:
265
- doc = ""
266
- for line in lines:
267
- line = line.strip()
268
- if line.startswith("# newdoc_id"):
269
- doc = line.split("=",maxsplit=1)[1].strip()
270
- if "GUM" in doc and "reddit" not in doc:
271
- output.append(line)
272
- continue
273
- if line.startswith("# text"):
274
- m = re.match(r'(# text ?= ?)(.+)',line)
275
- if m is not None:
276
- line = m.group(1) + re.sub(r'[^\s]','_',m.group(2))
277
- output.append(line)
278
- elif "\t" in line:
279
- fields = line.split("\t")
280
- tok_col, lemma_col = fields[1:3]
281
- if lemma_col == tok_col: # Delete lemma if identical to token
282
- fields[2] = '_'
283
- elif tok_col.lower() == lemma_col:
284
- fields[2] = "*LOWER*"
285
- if skiplen < 1:
286
- fields[1] = len(tok_col)*'_'
287
- else:
288
- skiplen -=1
289
- output.append("\t".join(fields))
290
- if "-" in fields[0]: # Multitoken
291
- start, end = fields[0].split("-")
292
- start = int(start)
293
- end = int(end)
294
- skiplen = end - start + 1
295
- else:
296
- output.append(line)
297
- fout.write('\n'.join(output) + "\n")
298
 
299
 
300
  def get_no_space_strings(cache_dict):
301
- import ast
302
-
303
- no_space_docs = defaultdict(str)
304
-
305
- for doc in gum_docs:
306
- for post in gum_docs[doc]:
307
- if post["id"] in cache_dict:
308
- json_result = cache_dict[post["id"]]
309
- parsed = ast.literal_eval(json_result)[0]
310
- if post["type"]=="post":
311
- plain = parsed["selftext"]
312
- title = parsed["title"]
313
- if "title_only" in post:
314
- if post["title_only"]:
315
- plain = ""
316
- if "title_double" in post:
317
- title = title + " " + title
318
- else:
319
- plain = parsed["body"]
320
- title = ""
321
- if "_space" in doc:
322
- plain = plain.replace("&gt;","") # GUM_reddit_space has formatting &gt; to indicate indented block quotes
323
- elif "_gender" in doc:
324
- plain = plain.replace("- The vast","The vast")
325
- plain = plain.replace("- Society already accommodates","Society already accommodates")
326
- plain = plain.replace("- Society recognizes disabilities","Society recognizes disabilities")
327
- plain = plain.replace("- It’s a waste of time","It’s a waste of time")
328
- plain = plain.replace("PB&amp;J","PB&J")
329
- elif "_monsters" in doc:
330
- plain = plain.replace("1. He refers to","a. He refers to")
331
- plain = plain.replace("2. Using these","b. Using these")
332
- plain = plain.replace("3. And he has","c. And he has")
333
- plain = plain.replace("&#x200B; &#x200B;","")
334
- plain = re.sub(r' [0-9]+\. ',' ',plain)
335
- elif "_ring" in doc:
336
- plain = plain.replace("&gt;",">")
337
- elif "_escape" in doc:
338
- plain = plain.replace("*1 year later*","1 year later")
339
- elif "_racial" in doc:
340
- plain = plain.replace("> ","")
341
- elif "_callout" in doc:
342
- plain = plain.replace("_it","it").replace("well?_","well?").replace(">certain","certain")
343
- elif "_conspiracy" in doc:
344
- plain = plain.replace(">", "")
345
- elif "_stroke" in doc:
346
- plain = plain.replace("&amp;", "&")
347
- elif "_bobby" in doc:
348
- plain = plain.replace("&amp;", "&")
349
- elif "_introvert" in doc:
350
- plain = plain.replace("enjoy working out.","enjoy working out").replace("~~","")
351
- elif "_social" in doc:
352
- plain = plain.replace("the purpose","those purpose").replace("&#x200B;","")
353
- no_space = re.sub(r"\s","",plain).replace("*","")
354
- no_space = re.sub(r'\[([^]]+)\]\([^)]+\)',r'\1',no_space) # Remove Wiki style links: [text](URL)
355
- if no_space_docs[doc] == "":
356
- no_space_docs[doc] += re.sub(r"\s","",title).replace("*","")
357
- no_space_docs[doc] += no_space
358
-
359
- return no_space_docs
360
 
361
 
362
  def harvest_text(files):
363
- """
364
 
365
- :param files: LDC files containing raw text data
366
- :return: Dictionary of document base names (e.g. wsj_0013) to string of non-whitespace characters in the document
367
- """
368
 
369
- docs = {}
370
 
371
- for file_ in files:
372
- docname = os.path.basename(file_)
373
- if "." in docname:
374
- docname = docname.split(".")[0]
375
- try:
376
- text = io.open(file_,encoding="utf8").read()
377
- except:
378
- text = io.open(file_,encoding="Latin1").read() # e.g. wsj_0142
379
- text = text.replace(".START","") # Remove PDTB .START codes
380
- text = re.sub(r'\s','', text) # Remove all whitespace
381
- docs[docname] = text
382
 
383
- return docs
384
 
385
 
386
  def get_proxy_data():
387
- import requests
388
- out_posts = {}
389
- tab_delim = requests.get("https://corpling.uis.georgetown.edu/gum/fetch_text_proxy.py").text
390
- for line in tab_delim.split("\n"):
391
- if "\t" in line:
392
- post, text = line.split("\t")
393
- out_posts[post] = text
394
- return out_posts
395
 
396
 
397
  def restore_docs(text_dict,dep_files=[],rel_files=[],tok_files=[]):
398
- def restore_range(range_string, underscored, tid_dict):
399
- output = []
400
- tok_ids = []
401
- range_strings = range_string.split(",")
402
- for r in range_strings:
403
- if "-" in r:
404
- s, e = r.split("-")
405
- tok_ids += list(range(int(s),int(e)+1))
406
- else:
407
- tok_ids.append(int(r))
408
-
409
- for tok in underscored.split():
410
- if tok == "<*>":
411
- output.append(tok)
412
- else:
413
- tid = tok_ids.pop(0)
414
- output.append(tid_dict[tid])
415
- return " ".join(output)
416
-
417
-
418
- skiplen = 0
419
- token_dict = {}
420
- tid2string = defaultdict(dict)
421
- for file_ in dep_files + tok_files + rel_files:
422
- lines = io.open(file_,encoding="utf8").readlines()
423
- underscore_len = 0 # Must match doc_len at end of file processing
424
- doc_len = 0
425
- if file_.endswith(".rels") or file_ in rel_files:
426
- output = []
427
- violation_rows = []
428
- for l, line in enumerate(lines):
429
- line = line.strip()
430
- if l > 0 and "\t" in line:
431
- fields = line.split("\t")
432
- docname = fields[0]
433
- text = text_dict[docname]
434
- if "GUM_" in docname and "reddit" not in docname: # Only Reddit documents need reconstruction in GUM
435
- output.append(line)
436
- continue
437
- doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label = line.split("\t")
438
- underscore_len += unit1_txt.count("_") + unit2_txt.count("_") + unit1_sent.count("_") + unit2_sent.count("_")
439
- if underscore_len == 0:
440
  continue
441
- #sys.stderr.write("! Non-underscored file detected - " + os.path.basename(file_) + "\n")
442
- #sys.exit(0)
443
- unit1_txt = restore_range(unit1_toks, unit1_txt, tid2string[docname])
444
- unit2_txt = restore_range(unit2_toks, unit2_txt, tid2string[docname])
445
- unit1_sent = restore_range(s1_toks, unit1_sent, tid2string[docname])
446
- unit2_sent = restore_range(s2_toks, unit2_sent, tid2string[docname])
447
- plain = unit1_txt + unit2_txt + unit1_sent + unit2_sent
448
- plain = plain.replace("<*>","").replace(" ","")
449
- doc_len += len(plain)
450
- fields = doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label
451
- line = "\t".join(fields)
452
- if doc_len != underscore_len and len(violation_rows) == 0:
453
- violation_rows.append(str(l) + ": " + line)
454
- output.append(line)
455
-
456
- else:
457
- tokfile = True if ".tok" in file_ else False
458
- output = []
459
- parse_text = ""
460
- docname = ""
461
- for line in lines:
462
- line = line.strip()
463
- if "# newdoc_id " in line:
464
- tid = 0
465
- if parse_text !="":
466
- if not tokfile:
467
- token_dict[docname] = parse_text
468
- parse_text = ""
469
- docname = re.search(r'# newdoc_id ?= ?([^\s]+)',line).group(1)
470
- if "GUM" in docname and "reddit" not in docname:
471
- output.append(line)
472
- continue
473
- if docname not in text_dict:
474
- raise IOError("! Text for document name " + docname + " not found.\n Please check that your LDC data contains the file for this document.\n")
475
- if ".tok" in file_:
476
- text = token_dict[docname]
477
- else:
478
- text = text_dict[docname]
479
- doc_len = len(text)
480
- underscore_len = 0
481
-
482
- if "GUM" in docname and "reddit" not in docname:
483
- output.append(line)
484
- continue
485
-
486
- if line.startswith("# text"):
487
- m = re.match(r'(# ?text ?= ?)(.+)',line)
488
- if m is not None:
489
- i = 0
490
- sent_text = ""
491
- for char in m.group(2).strip():
492
- if char != " ":
493
- sent_text += text[i]
494
- i+=1
495
- else:
496
- sent_text += " "
497
- line = m.group(1) + sent_text
498
- output.append(line)
499
- elif "\t" in line:
500
- fields = line.split("\t")
501
- if skiplen < 1:
502
- underscore_len += len(fields[1])
503
- fields[1] = text[:len(fields[1])]
504
- if not "-" in fields[0] and not "." in fields[0]:
505
- parse_text += fields[1]
506
- tid += 1
507
- tid2string[docname][tid] = fields[1]
508
- if not tokfile:
509
- if fields[2] == '_' and not "-" in fields[0] and not "." in fields[0]:
510
- fields[2] = fields[1]
511
- elif fields[2] == "*LOWER*":
512
- fields[2] = fields[1].lower()
513
- if skiplen < 1:
514
- text = text[len(fields[1]):]
515
- else:
516
- skiplen -=1
517
- output.append("\t".join(fields))
518
- if "-" in fields[0]: # Multitoken
519
- start, end = fields[0].split("-")
520
- start = int(start)
521
- end = int(end)
522
- skiplen = end - start + 1
523
- else:
524
- output.append(line)
525
-
526
- if not doc_len == underscore_len:
527
- if ".rels" in file_:
528
- sys.stderr.write(
529
- "\n! Tried to restore file " + os.path.basename(file_) + " but source text has different length than tokens in shared task file:\n" + \
530
- " Source text in data/: " + str(doc_len) + " non-whitespace characters\n" + \
531
- " Token underscores in " + file_ + ": " + str(underscore_len) + " non-whitespace characters\n" + \
532
- " Violation row: " + violation_rows[0])
533
- else:
534
- sys.stderr.write("\n! Tried to restore document " + docname + " but source text has different length than tokens in shared task file:\n" + \
535
- " Source text in data/: " + str(doc_len) + " non-whitespace characters\n" + \
536
- " Token underscores in " + file_+": " + str(underscore_len) + " non-whitespace characters\n")
537
- with io.open("debug.txt",'w',encoding="utf8") as f:
538
- f.write(text_dict[docname])
539
- f.write("\n\n\n")
540
- f.write(parse_text)
541
- sys.exit(0)
542
-
543
- if not tokfile and parse_text != "":
544
- token_dict[docname] = parse_text
545
-
546
- with io.open(file_, 'w', encoding='utf8', newline="\n") as fout:
547
- fout.write("\n".join(output) + "\n")
548
-
549
- sys.stderr.write("o Restored text in " + str(len(dep_files)) + " .conllu files, " + str(len(tok_files)) +
550
- " .tok files and "+ str(len(rel_files)) + " .rels files\n")
551
 
552
  def run(corpus="all", rel_files=[], dep_files=[], tok_files=[],
553
  rstdt_path=None, pdtb_path=None, cdtb_path=None, tdb_path=None):
 
23
 
24
  PY3 = sys.version_info[0] == 3
25
  if not PY3:
26
+ input = raw_input
27
 
28
 
29
  gum_docs = {
30
+ "GUM_reddit_macroeconomics": [
31
+ {"year": "2017", "month": "09", "id": "6zm74h", "type": "post","source":"undef"},
32
+ {"year": "2017", "month": "09", "id": "dmwwqlt", "type":"comment","source":"undef"}
33
+ ],
34
+ "GUM_reddit_stroke": [
35
+ {"year": "2017", "month": "08", "id": "6ws3eh", "type": "post","source":"undef"},
36
+ {"year": "2017", "month": "08", "id": "dmaei1x", "type":"comment","source":"undef"},
37
+ {"year": "2017", "month": "08", "id": "dmaiwsm", "type":"comment","source":"undef"},
38
+ {"year": "2017", "month": "09", "id": "dmkx8bk", "type":"comment","source":"undef"},
39
+ {"year": "2017", "month": "09", "id": "dmm1327", "type":"comment","source":"undef"},
40
+ {"year": "2017", "month": "08", "id": "dmaoodn", "type":"comment","source":"undef"}
41
+ ],
42
+ "GUM_reddit_polygraph": [
43
+ {"year": "2014", "month": "12", "id": "2q6qnv", "type": "post","source":"undef"}
44
+ ],
45
+ "GUM_reddit_ring": [
46
+ {"year": "2016", "month": "09", "id": "5570x1", "type": "post","source":"undef"},
47
+ {"year": "2016", "month": "09", "id": "d885ma0", "type":"comment","source":"undef"},
48
+ {"year": "2016", "month": "09", "id": "d8880w7", "type":"comment","source":"undef"},
49
+ {"year": "2016", "month": "09", "id": "d88u7dg", "type":"comment","source":"undef"},
50
+ {"year": "2016", "month": "09", "id": "d88unu3", "type":"comment","source":"undef"},
51
+ {"year": "2016", "month": "09", "id": "d88v0sz", "type":"comment","source":"undef"},
52
+ {"year": "2016", "month": "09", "id": "d88xaqu", "type":"comment","source":"undef"},
53
+ {"year": "2016", "month": "10", "id": "d893mj9", "type":"comment","source":"undef"},
54
+ {"year": "2016", "month": "09", "id": "d88s4bb", "type":"comment","source":"undef"},
55
+ {"year": "2016", "month": "10", "id": "d88zt6x", "type":"comment","source":"undef"}
56
+ ],
57
+ "GUM_reddit_space": [
58
+ {"year": "2016", "month": "08", "id": "50hx5c", "type": "post","source":"undef"},
59
+ {"year": "2016", "month": "08", "id": "d7471k5", "type":"comment","source":"undef"},
60
+ {"year": "2016", "month": "08", "id": "d74i5ka", "type":"comment","source":"undef"},
61
+ {"year": "2016", "month": "08", "id": "d74ppi0", "type":"comment","source":"undef"}
62
+ ],
63
+ "GUM_reddit_superman": [
64
+ #{"year": "2017", "month": "04", "id": "68e0u3", "type": "post", "title_only": True}, # Post title not included in this document
65
+ {"year": "2017", "month": "05", "id": "dgys1z8", "type":"comment","source":"undef"}
66
+ ],
67
+ "GUM_reddit_bobby": [
68
+ {"year":"2018","month":"06","id":"8ph56q","type": "post","source":"undef"},
69
+ {"year":"2018","month":"06","id":"e0b8zz4","type":"comment","source":"undef"},
70
+ {"year":"2018","month":"06","id":"e0dwqlg","type":"comment","source":"undef"},
71
+ {"year":"2018","month":"06","id":"e15pcqu","type":"comment","source":"undef"},
72
+ {"year":"2018","month":"06","id":"e0dz1mp","type":"comment","source":"undef"},
73
+ {"year":"2018","month":"06","id":"e1uuo9e","type":"comment","source":"undef"},
74
+ {"year":"2018","month":"06","id":"e0brc9w","type":"comment","source":"undef"},
75
+ {"year":"2018","month":"06","id":"e0bz951","type":"comment","source":"undef"}
76
+ ],
77
+ "GUM_reddit_escape": [
78
+ {"year":"2017","month":"05","id":"69r98j","type": "post","source":"undef"},
79
+ {"year":"2017","month":"05","id":"dh96n8v","type":"comment","source":"undef"},
80
+ {"year":"2017","month":"05","id":"dh9enpe","type":"comment","source":"undef"},
81
+ {"year":"2017","month":"05","id":"dht8oyn","type":"comment","source":"undef"},
82
+ {"year":"2017","month":"05","id":"dhn0hoe","type":"comment","source":"undef"},
83
+ {"year":"2017","month":"07","id":"dk9ted1","type":"comment","source":"undef"},
84
+ {"year":"2017","month":"05","id":"dh98kcg","type":"comment","source":"undef"},
85
+ {"year":"2017","month":"05","id":"dh9zxej","type":"comment","source":"undef"},
86
+ {"year":"2017","month":"05","id":"di9x7j9","type":"comment","source":"undef"},
87
+ {"year":"2017","month":"05","id":"di9xsrt","type":"comment","source":"undef"},
88
+ {"year":"2017","month":"06","id":"din85zf","type":"comment","source":"undef"},
89
+ {"year":"2017","month":"06","id":"dinab0w","type":"comment","source":"undef"},
90
+ {"year":"2017","month":"06","id":"dinaggd","type":"comment","source":"undef"},
91
+ {"year":"2017","month":"06","id":"dinbyb9","type":"comment","source":"undef"},
92
+ {"year":"2017","month":"06","id":"dj65sp1","type":"comment","source":"undef"},
93
+ {"year":"2017","month":"06","id":"dizdd8a","type":"comment","source":"undef"},
94
+ {"year":"2017","month":"07","id":"dk78qw8","type":"comment","source":"undef"},
95
+ {"year":"2017","month":"08","id":"dm0gqc7","type":"comment","source":"undef"},
96
+ {"year":"2017","month":"10","id":"domd1r0","type":"comment","source":"undef"},
97
+ {"year":"2017","month":"05","id":"dh9irie","type":"comment","source":"undef"},
98
+ {"year":"2017","month":"05","id":"dh9iw36","type":"comment","source":"undef"},
99
+ {"year":"2017","month":"06","id":"djlcwu5","type":"comment","source":"undef"},
100
+ {"year":"2017","month":"06","id":"dlzcxpy","type":"comment","source":"undef"},
101
+ {"year":"2017","month":"05","id":"dhabstb","type":"comment","source":"undef"},
102
+ {"year":"2017","month":"05","id":"dhbr3m6","type":"comment","source":"undef"},
103
+ {"year":"2017","month":"06","id":"diz97qy","type":"comment"}
104
+ ],
105
+ "GUM_reddit_gender": [
106
+ {"year":"2018","month":"09","id":"9e5urs","type":"post","source":"bigquery"},
107
+ {"year":"2018","month":"09","id":"e5mg3s7","type":"comment","source":"undef"},
108
+ {"year":"2018","month":"09","id":"e5mkpok","type":"comment","source":"bigquery"},
109
+ {"year":"2018","month":"09","id":"e5nxbmb","type":"comment","source":"bigquery"},
110
+ {"year":"2018","month":"09","id":"e5nzg9j","type":"comment","source":"undef"},
111
+ {"year":"2018","month":"09","id":"e5mh94v","type":"comment","source":"undef"},
112
+ {"year":"2018","month":"09","id":"e5mmenp","type":"comment","source":"undef"},
113
+ {"year":"2018","month":"09","id":"e5ms5u3","type":"comment","source":"undef"}
114
+ ],
115
+ "GUM_reddit_monsters":[
116
+ {"year":"2018","month":"09","id":"9eci2u","type":"post","source":"undef"},
117
+ {"year":"2018","month":"09","id":"e5ox2jr","type":"comment","source":"undef"},
118
+ {"year":"2018","month":"09","id":"e5p3gtl","type":"comment","source":"undef"},
119
+ {"year":"2018","month":"09","id":"e5pnfro","type":"comment","source":"undef"},
120
+ {"year":"2018","month":"09","id":"e5q08o4","type":"comment","source":"undef"},
121
+ {"year":"2018","month":"09","id":"e5pney1","type":"comment","source":"undef"},
122
+ ],
123
+ "GUM_reddit_pandas":[
124
+ {"year":"2018","month":"09","id":"9e3s9h","type":"post","source":"undef"},
125
+ {"year":"2018","month":"09","id":"e5lwy6n","type":"comment","source":"undef"},
126
+ {"year":"2018","month":"09","id":"e5m397o","type":"comment","source":"undef"},
127
+ {"year":"2018","month":"09","id":"e5m3xgb","type":"comment","source":"undef"},
128
+ {"year":"2018","month":"09","id":"e5m3z2e","type":"comment","source":"undef"},
129
+ {"year":"2018","month":"09","id":"e5lwbbt","type":"comment","source":"undef"},
130
+ {"year":"2018","month":"09","id":"e5m38sr","type":"comment","source":"undef"},
131
+ {"year":"2018","month":"09","id":"e5m42cu","type":"comment","source":"undef"},
132
+ {"year":"2018","month":"09","id":"e5lvlxm","type":"comment","source":"undef"},
133
+ {"year":"2018","month":"09","id":"e5lvqay","type":"comment","source":"undef"},
134
+ {"year":"2018","month":"09","id":"e5lw5t6","type":"comment","source":"undef"}, # Blowhole
135
+ {"year":"2018","month":"09","id":"e5lwz31","type":"comment","source":"undef"},
136
+ {"year":"2018","month":"09","id":"e5lxi0s","type":"comment","source":"undef"},
137
+ {"year":"2018","month":"09","id":"e5lwxqq","type":"comment","source":"undef"},
138
+ {"year":"2018","month":"09","id":"e5lzv1b","type":"comment","source":"undef"},
139
+ {"year":"2018","month":"09","id":"e5m48ag","type":"comment","source":"undef"},
140
+ {"year":"2018","month":"09","id":"e5m1yqe","type":"comment","source":"undef"},
141
+ {"year":"2018","month":"09","id":"e5lx0sw","type":"comment","source":"undef"},
142
+ {"year":"2018","month":"09","id":"e5m2n80","type":"comment","source":"undef"},
143
+ {"year":"2018","month":"09","id":"e5m2wrh","type":"comment","source":"undef"},
144
+ {"year":"2018","month":"09","id":"e5m3blb","type":"comment","source":"undef"},
145
+ {"year":"2018","month":"09","id":"e5lvxoc","type":"comment","source":"undef"},
146
+ {"year":"2018","month":"09","id":"e5m1abg","type":"comment","source":"undef"},
147
+ {"year":"2018","month":"09","id":"e5m1w5i","type":"comment","source":"undef"},
148
+ {"year":"2018","month":"09","id":"e5m3pdi","type":"comment","source":"undef"},
149
+ {"year":"2018","month":"09","id":"e5m3ruf","type":"comment","source":"undef"},
150
+ {"year":"2018","month":"09","id":"e5m4yu2","type":"comment","source":"undef"},
151
+ {"year":"2018","month":"09","id":"e5m5bcb","type":"comment","source":"undef"}
152
+ ],
153
+ "GUM_reddit_steak": [
154
+ {"year":"2015","month":"08","id":"3im341","type":"post","source":"undef"}
155
+ ],
156
+ "GUM_reddit_card": [
157
+ {"year":"2019","month":"08","id":"cmqrwo","type":"post","source":"undef"},
158
+ {"year":"2019","month":"08","id":"ew3zrqg","type":"comment","source":"undef"},
159
+ {"year":"2019","month":"08","id":"ew43d2c","type":"comment","source":"undef"},
160
+ {"year":"2019","month":"08","id":"ew43oks","type":"comment","source":"undef"},
161
+ {"year":"2019","month":"08","id":"ew43ymc","type":"comment","source":"undef"},
162
+ {"year":"2019","month":"08","id":"ew46h1p","type":"comment","source":"undef"},
163
+ {"year":"2019","month":"08","id":"ew46oly","type":"comment","source":"undef"},
164
+ {"year":"2019","month":"08","id":"ew46wq7","type":"comment","source":"undef"},
165
+ {"year":"2019","month":"08","id":"ew470zc","type":"comment","source":"undef"}
166
+ ],
167
+ "GUM_reddit_callout": [
168
+ {"year":"2019","month":"09","id":"d1eg3u","type":"post","source":"undef"},
169
+ {"year":"2019","month":"09","id":"ezkucpg","type":"comment","source":"undef"},
170
+ {"year":"2019","month":"09","id":"ezkv0cc","type":"comment","source":"undef"},
171
+ {"year":"2019","month":"09","id":"ezkwbx9","type":"comment","source":"undef"},
172
+ {"year":"2019","month":"09","id":"ezlh2o6","type":"comment","source":"undef"},
173
+ {"year":"2019","month":"09","id":"ezlkajf","type":"comment","source":"undef"},
174
+ {"year":"2019","month":"09","id":"ezlnco2","type":"comment","source":"undef"},
175
+ {"year":"2019","month":"09","id":"ezo20yy","type":"comment","source":"undef"},
176
+ {"year":"2019","month":"09","id":"ezkwcvh","type":"comment","source":"undef"},
177
+ {"year":"2019","month":"09","id":"ezl07dm","type":"comment","source":"undef"},
178
+ {"year":"2019","month":"09","id":"ezmajm7","type":"comment","source":"undef"},
179
+ {"year":"2019","month":"09","id":"ezl1wz3","type":"comment","source":"undef"},
180
+ ],
181
+ "GUM_reddit_conspiracy": [
182
+ {"year":"2019","month":"02","id":"aumhwo","type":"post","source":"undef"},
183
+ {"year":"2019","month":"02","id":"eh9rt0n","type":"comment","source":"undef"},
184
+ {"year":"2019","month":"02","id":"eh9tvyw","type":"comment","source":"undef"},
185
+ {"year":"2019","month":"02","id":"ehc0l2q","type":"comment","source":"undef"},
186
+ {"year":"2019","month":"02","id":"ehclwtv","type":"comment","source":"undef"},
187
+ {"year":"2019","month":"02","id":"eh9jo5x","type":"comment","source":"undef"},
188
+ {"year":"2019","month":"02","id":"ehr2665","type":"comment","source":"undef"},
189
+ {"year":"2019","month":"02","id":"eha3c1q","type":"comment","source":"undef"},
190
+ {"year":"2019","month":"02","id":"eha5jlq","type":"comment","source":"undef"},
191
+ ],
192
+ "GUM_reddit_introverts": [
193
+ {"year":"2019","month":"06","id":"by820m","type":"post","source":"undef","title_double": True}, # Possible title was repeated by annotator
194
+ {"year":"2019","month":"06","id":"eqeik8m","type":"comment","source":"undef"},
195
+ {"year":"2019","month":"06","id":"eqfgaeu","type":"comment","source":"undef"},
196
+ {"year":"2019","month":"06","id":"eqfplpg","type":"comment","source":"undef"},
197
+ {"year":"2019","month":"06","id":"eqg6a5u","type":"comment","source":"undef"},
198
+ {"year":"2019","month":"06","id":"eqh6j29","type":"comment","source":"undef"},
199
+ {"year":"2019","month":"06","id":"eqhjtwr","type":"comment","source":"undef"},
200
+ {"year":"2019","month":"06","id":"eqi2jl3","type":"comment","source":"undef"},
201
+ {"year":"2019","month":"06","id":"eqii2kf","type":"comment","source":"undef"},
202
+ {"year":"2019","month":"06","id":"eqhlj8j","type":"comment","source":"undef"},
203
+
204
+ ],
205
+ "GUM_reddit_racial": [
206
+ {"year":"2019","month":"09","id":"d1urjk","type":"post","source":"undef"},
207
+ {"year":"2019","month":"09","id":"ezq9y6w","type":"comment","source":"bigquery"},
208
+ {"year":"2019","month":"09","id":"ezqpqmm","type":"comment","source":"undef"},
209
+ {"year":"2019","month":"09","id":"ezq8xs7","type":"comment","source":"undef"},
210
+ {"year":"2019","month":"09","id":"ezr55wk","type":"comment","source":"undef"},
211
+ ],
212
+ "GUM_reddit_social": [
213
+ {"year":"2019","month":"09","id":"d1qy3g","type":"post","source":"undef"},
214
+ {"year":"2019","month":"09","id":"ezpb3jg","type":"comment","source":"undef"},
215
+ {"year":"2019","month":"09","id":"ezpdmy3","type":"comment","source":"undef"},
216
+ {"year":"2019","month":"09","id":"ezpjor8","type":"comment","source":"bigquery"},
217
+ {"year":"2019","month":"09","id":"ezpiozm","type":"comment","source":"undef"},
218
+ {"year":"2019","month":"09","id":"ezpc1ps","type":"comment","source":"undef"},
219
+ {"year":"2019","month":"09","id":"ezp9fbh","type":"comment","source":"undef"},
220
+ {"year":"2019","month":"09","id":"ezqrumb","type":"comment","source":"undef"},
221
+ {"year":"2019","month":"09","id":"ezpe0e6","type":"comment","source":"undef"},
222
+ {"year":"2019","month":"09","id":"ezpf71f","type":"comment","source":"undef"},
223
+ {"year":"2019","month":"09","id":"ezt7qlf","type":"comment","source":"undef"},
224
+ {"year":"2019","month":"09","id":"ezpc4jj","type":"comment","source":"undef"},
225
+ {"year":"2019","month":"09","id":"ezpa2e4","type":"comment","source":"undef"},
226
+ {"year":"2019","month":"09","id":"ezpfzql","type":"comment","source":"undef"},
227
+ {"year":"2019","month":"09","id":"ezpi39v","type":"comment","source":"undef"},
228
+ ]
229
  }
230
 
231
  def underscore_files(filenames):
232
+ def underscore_rel_field(text):
233
+ blanked = []
234
+ text = text.replace("<*>","❤")
235
+ for c in text:
236
+ if c!="❤" and c!=" ":
237
+ blanked.append("_")
238
+ else:
239
+ blanked.append(c)
240
+ return "".join(blanked).replace("❤","<*>")
241
+
242
+ for f_path in filenames:
243
+ skiplen = 0
244
+ with io.open(f_path, 'r', encoding='utf8') as fin:
245
+ lines = fin.readlines()
246
+
247
+ with io.open(f_path, 'w', encoding='utf8', newline="\n") as fout:
248
+ output = []
249
+ if f_path.endswith(".rels"):
250
+ for l, line in enumerate(lines):
251
+ line = line.strip()
252
+ if "\t" in line and l > 0:
253
+ doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label = line.split("\t")
254
+ if "GUM" in doc and "reddit" not in doc:
255
+ output.append(line)
256
+ continue
257
+ unit1_txt = underscore_rel_field(unit1_txt)
258
+ unit2_txt = underscore_rel_field(unit2_txt)
259
+ unit1_sent = underscore_rel_field(unit1_sent)
260
+ unit2_sent = underscore_rel_field(unit2_sent)
261
+ fields = doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label
262
+ line = "\t".join(fields)
263
+ output.append(line)
264
+ else:
265
+ doc = ""
266
+ for line in lines:
267
+ line = line.strip()
268
+ if line.startswith("# newdoc_id"):
269
+ doc = line.split("=",maxsplit=1)[1].strip()
270
+ if "GUM" in doc and "reddit" not in doc:
271
+ output.append(line)
272
+ continue
273
+ if line.startswith("# text"):
274
+ m = re.match(r'(# text ?= ?)(.+)',line)
275
+ if m is not None:
276
+ line = m.group(1) + re.sub(r'[^\s]','_',m.group(2))
277
+ output.append(line)
278
+ elif "\t" in line:
279
+ fields = line.split("\t")
280
+ tok_col, lemma_col = fields[1:3]
281
+ if lemma_col == tok_col: # Delete lemma if identical to token
282
+ fields[2] = '_'
283
+ elif tok_col.lower() == lemma_col:
284
+ fields[2] = "*LOWER*"
285
+ if skiplen < 1:
286
+ fields[1] = len(tok_col)*'_'
287
+ else:
288
+ skiplen -=1
289
+ output.append("\t".join(fields))
290
+ if "-" in fields[0]: # Multitoken
291
+ start, end = fields[0].split("-")
292
+ start = int(start)
293
+ end = int(end)
294
+ skiplen = end - start + 1
295
+ else:
296
+ output.append(line)
297
+ fout.write('\n'.join(output) + "\n")
298
 
299
 
300
  def get_no_space_strings(cache_dict):
301
+ import ast
302
+
303
+ no_space_docs = defaultdict(str)
304
+
305
+ for doc in gum_docs:
306
+ for post in gum_docs[doc]:
307
+ if post["id"] in cache_dict:
308
+ json_result = cache_dict[post["id"]]
309
+ parsed = ast.literal_eval(json_result)[0]
310
+ if post["type"]=="post":
311
+ plain = parsed["selftext"]
312
+ title = parsed["title"]
313
+ if "title_only" in post:
314
+ if post["title_only"]:
315
+ plain = ""
316
+ if "title_double" in post:
317
+ title = title + " " + title
318
+ else:
319
+ plain = parsed["body"]
320
+ title = ""
321
+ if "_space" in doc:
322
+ plain = plain.replace("&gt;","") # GUM_reddit_space has formatting &gt; to indicate indented block quotes
323
+ elif "_gender" in doc:
324
+ plain = plain.replace("- The vast","The vast")
325
+ plain = plain.replace("- Society already accommodates","Society already accommodates")
326
+ plain = plain.replace("- Society recognizes disabilities","Society recognizes disabilities")
327
+ plain = plain.replace("- It’s a waste of time","It’s a waste of time")
328
+ plain = plain.replace("PB&amp;J","PB&J")
329
+ elif "_monsters" in doc:
330
+ plain = plain.replace("1. He refers to","a. He refers to")
331
+ plain = plain.replace("2. Using these","b. Using these")
332
+ plain = plain.replace("3. And he has","c. And he has")
333
+ plain = plain.replace("&#x200B; &#x200B;","")
334
+ plain = re.sub(r' [0-9]+\. ',' ',plain)
335
+ elif "_ring" in doc:
336
+ plain = plain.replace("&gt;",">")
337
+ elif "_escape" in doc:
338
+ plain = plain.replace("*1 year later*","1 year later")
339
+ elif "_racial" in doc:
340
+ plain = plain.replace("> ","")
341
+ elif "_callout" in doc:
342
+ plain = plain.replace("_it","it").replace("well?_","well?").replace(">certain","certain")
343
+ elif "_conspiracy" in doc:
344
+ plain = plain.replace(">", "")
345
+ elif "_stroke" in doc:
346
+ plain = plain.replace("&amp;", "&")
347
+ elif "_bobby" in doc:
348
+ plain = plain.replace("&amp;", "&")
349
+ elif "_introvert" in doc:
350
+ plain = plain.replace("enjoy working out.","enjoy working out").replace("~~","")
351
+ elif "_social" in doc:
352
+ plain = plain.replace("the purpose","those purpose").replace("&#x200B;","")
353
+ no_space = re.sub(r"\s","",plain).replace("*","")
354
+ no_space = re.sub(r'\[([^]]+)\]\([^)]+\)',r'\1',no_space) # Remove Wiki style links: [text](URL)
355
+ if no_space_docs[doc] == "":
356
+ no_space_docs[doc] += re.sub(r"\s","",title).replace("*","")
357
+ no_space_docs[doc] += no_space
358
+
359
+ return no_space_docs
360
 
361
 
362
  def harvest_text(files):
363
+ """
364
 
365
+ :param files: LDC files containing raw text data
366
+ :return: Dictionary of document base names (e.g. wsj_0013) to string of non-whitespace characters in the document
367
+ """
368
 
369
+ docs = {}
370
 
371
+ for file_ in files:
372
+ docname = os.path.basename(file_)
373
+ if "." in docname:
374
+ docname = docname.split(".")[0]
375
+ try:
376
+ text = io.open(file_,encoding="utf8").read()
377
+ except:
378
+ text = io.open(file_,encoding="Latin1").read() # e.g. wsj_0142
379
+ text = text.replace(".START","") # Remove PDTB .START codes
380
+ text = re.sub(r'\s','', text) # Remove all whitespace
381
+ docs[docname] = text
382
 
383
+ return docs
384
 
385
 
386
  def get_proxy_data():
387
+ import requests
388
+ out_posts = {}
389
+ tab_delim = requests.get("https://corpling.uis.georgetown.edu/gum/fetch_text_proxy.py").text
390
+ for line in tab_delim.split("\n"):
391
+ if "\t" in line:
392
+ post, text = line.split("\t")
393
+ out_posts[post] = text
394
+ return out_posts
395
 
396
 
397
  def restore_docs(text_dict,dep_files=[],rel_files=[],tok_files=[]):
398
+ def restore_range(range_string, underscored, tid_dict):
399
+ output = []
400
+ tok_ids = []
401
+ range_strings = range_string.split(",")
402
+ for r in range_strings:
403
+ if "-" in r:
404
+ s, e = r.split("-")
405
+ tok_ids += list(range(int(s),int(e)+1))
406
+ else:
407
+ tok_ids.append(int(r))
408
+
409
+ for tok in underscored.split():
410
+ if tok == "<*>":
411
+ output.append(tok)
412
+ else:
413
+ tid = tok_ids.pop(0)
414
+ output.append(tid_dict[tid])
415
+ return " ".join(output)
416
+
417
+
418
+ skiplen = 0
419
+ token_dict = {}
420
+ tid2string = defaultdict(dict)
421
+ for file_ in dep_files + tok_files + rel_files:
422
+ lines = io.open(file_,encoding="utf8").readlines()
423
+ underscore_len = 0 # Must match doc_len at end of file processing
424
+ doc_len = 0
425
+ if file_.endswith(".rels") or file_ in rel_files:
426
+ output = []
427
+ violation_rows = []
428
+ for l, line in enumerate(lines):
429
+ line = line.strip()
430
+ if l > 0 and "\t" in line:
431
+ fields = line.split("\t")
432
+ docname = fields[0]
433
+ text = text_dict[docname]
434
+ if "GUM_" in docname and "reddit" not in docname: # Only Reddit documents need reconstruction in GUM
435
+ output.append(line)
436
+ continue
437
+ doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label = line.split("\t")
438
+ underscore_len += unit1_txt.count("_") + unit2_txt.count("_") + unit1_sent.count("_") + unit2_sent.count("_")
439
+ if underscore_len == 0:
440
  continue
441
+ #sys.stderr.write("! Non-underscored file detected - " + os.path.basename(file_) + "\n")
442
+ #sys.exit(0)
443
+ unit1_txt = restore_range(unit1_toks, unit1_txt, tid2string[docname])
444
+ unit2_txt = restore_range(unit2_toks, unit2_txt, tid2string[docname])
445
+ unit1_sent = restore_range(s1_toks, unit1_sent, tid2string[docname])
446
+ unit2_sent = restore_range(s2_toks, unit2_sent, tid2string[docname])
447
+ plain = unit1_txt + unit2_txt + unit1_sent + unit2_sent
448
+ plain = plain.replace("<*>","").replace(" ","")
449
+ doc_len += len(plain)
450
+ fields = doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label
451
+ line = "\t".join(fields)
452
+ if doc_len != underscore_len and len(violation_rows) == 0:
453
+ violation_rows.append(str(l) + ": " + line)
454
+ output.append(line)
455
+
456
+ else:
457
+ tokfile = True if ".tok" in file_ else False
458
+ output = []
459
+ parse_text = ""
460
+ docname = ""
461
+ for line in lines:
462
+ line = line.strip()
463
+ if "# newdoc_id " in line:
464
+ tid = 0
465
+ if parse_text !="":
466
+ if not tokfile:
467
+ token_dict[docname] = parse_text
468
+ parse_text = ""
469
+ docname = re.search(r'# newdoc_id ?= ?([^\s]+)',line).group(1)
470
+ if "GUM" in docname and "reddit" not in docname:
471
+ output.append(line)
472
+ continue
473
+ if docname not in text_dict:
474
+ raise IOError("! Text for document name " + docname + " not found.\n Please check that your LDC data contains the file for this document.\n")
475
+ if ".tok" in file_:
476
+ text = token_dict[docname]
477
+ else:
478
+ text = text_dict[docname]
479
+ doc_len = len(text)
480
+ underscore_len = 0
481
+
482
+ if "GUM" in docname and "reddit" not in docname:
483
+ output.append(line)
484
+ continue
485
+
486
+ if line.startswith("# text"):
487
+ m = re.match(r'(# ?text ?= ?)(.+)',line)
488
+ if m is not None:
489
+ i = 0
490
+ sent_text = ""
491
+ for char in m.group(2).strip():
492
+ if char != " ":
493
+ sent_text += text[i]
494
+ i+=1
495
+ else:
496
+ sent_text += " "
497
+ line = m.group(1) + sent_text
498
+ output.append(line)
499
+ elif "\t" in line:
500
+ fields = line.split("\t")
501
+ if skiplen < 1:
502
+ underscore_len += len(fields[1])
503
+ fields[1] = text[:len(fields[1])]
504
+ if not "-" in fields[0] and not "." in fields[0]:
505
+ parse_text += fields[1]
506
+ tid += 1
507
+ tid2string[docname][tid] = fields[1]
508
+ if not tokfile:
509
+ if fields[2] == '_' and not "-" in fields[0] and not "." in fields[0]:
510
+ fields[2] = fields[1]
511
+ elif fields[2] == "*LOWER*":
512
+ fields[2] = fields[1].lower()
513
+ if skiplen < 1:
514
+ text = text[len(fields[1]):]
515
+ else:
516
+ skiplen -=1
517
+ output.append("\t".join(fields))
518
+ if "-" in fields[0]: # Multitoken
519
+ start, end = fields[0].split("-")
520
+ start = int(start)
521
+ end = int(end)
522
+ skiplen = end - start + 1
523
+ else:
524
+ output.append(line)
525
+
526
+ if not doc_len == underscore_len:
527
+ if ".rels" in file_:
528
+ sys.stderr.write(
529
+ "\n! Tried to restore file " + os.path.basename(file_) + " but source text has different length than tokens in shared task file:\n" + \
530
+ " Source text in data/: " + str(doc_len) + " non-whitespace characters\n" + \
531
+ " Token underscores in " + file_ + ": " + str(underscore_len) + " non-whitespace characters\n" + \
532
+ " Violation row: " + violation_rows[0])
533
+ else:
534
+ sys.stderr.write("\n! Tried to restore document " + docname + " but source text has different length than tokens in shared task file:\n" + \
535
+ " Source text in data/: " + str(doc_len) + " non-whitespace characters\n" + \
536
+ " Token underscores in " + file_+": " + str(underscore_len) + " non-whitespace characters\n")
537
+ with io.open("debug.txt",'w',encoding="utf8") as f:
538
+ f.write(text_dict[docname])
539
+ f.write("\n\n\n")
540
+ f.write(parse_text)
541
+ sys.exit(0)
542
+
543
+ if not tokfile and parse_text != "":
544
+ token_dict[docname] = parse_text
545
+
546
+ with io.open(file_, 'w', encoding='utf8', newline="\n") as fout:
547
+ fout.write("\n".join(output) + "\n")
548
+
549
+ sys.stderr.write("o Restored text in " + str(len(dep_files)) + " .conllu files, " + str(len(tok_files)) +
550
+ " .tok files and "+ str(len(rel_files)) + " .rels files\n")
551
 
552
  def run(corpus="all", rel_files=[], dep_files=[], tok_files=[],
553
  rstdt_path=None, pdtb_path=None, cdtb_path=None, tdb_path=None):