sileod commited on
Commit
dc9446b
1 Parent(s): c29c088

Update process_underscores.py

Browse files
Files changed (1) hide show
  1. process_underscores.py +66 -57
process_underscores.py CHANGED
@@ -3,18 +3,24 @@ process_underscores.py
3
 
4
  Script to handle licensed data for which underlying text cannot be posted online (e.g. LDC data).
5
  Users need a copy of the LDC distribution of an underlying resource to restore text in some of the corpora.
6
- python process_underscores.py gum --rel_files /root/.cache/huggingface/datasets/downloads/a59f7db26e03978a86f53aa481af30f68699dde7ff4782562a18f81d33a10c63 --dep_files /root/.cache/huggingface/datasets/downloads/afbcb3a0d2a43e55d1437c7ba2843486c747c79d486de7ceac566a8e959d59e2
 
7
  """
8
 
9
  __author__ = "Amir Zeldes+Damien Sileo"
10
  __license__ = "Apache 2.0"
11
- __version__ = "2.0.0"
12
 
13
  import io, re, os, sys
14
  from glob import glob
15
  from collections import defaultdict
16
  from argparse import ArgumentParser
17
 
 
 
 
 
 
18
  PY3 = sys.version_info[0] == 3
19
  if not PY3:
20
  input = raw_input
@@ -542,58 +548,61 @@ def restore_docs(text_dict,dep_files=[],rel_files=[],tok_files=[]):
542
  sys.stderr.write("o Restored text in " + str(len(dep_files)) + " .conllu files, " + str(len(tok_files)) +
543
  " .tok files and "+ str(len(rel_files)) + " .rels files\n")
544
 
545
-
546
- p = ArgumentParser()
547
- p.add_argument("corpus",action="store",choices=["rstdt","pdtb","cdtb","tdb","gum","all"],default="all",help="Name of the corpus to process or 'all'")
548
- p.add_argument("--rel_files", nargs='*', default=[])
549
- p.add_argument("--dep_files", nargs='*', default=[])
550
- p.add_argument("--tok_files", nargs='*', default=[])
551
- opts = p.parse_args()
552
-
553
-
554
- todo = {k:v for k,v in vars(opts).items() if 'files' in k}
555
-
556
- # Prompt user for corpus folders
557
- if opts.corpus == "rstdt" or opts.corpus == "all":
558
- rstdt_path = input("Enter path for LDC RST-DT data/ folder:\n> ")
559
- if not os.path.isdir(rstdt_path):
560
- sys.stderr.write("Can't find directory at: " + rstdt_path + "\n")
561
- sys.exit(0)
562
- files = glob(os.sep.join([rstdt_path,"RSTtrees-WSJ-main-1.0","TRAINING","*.edus"])) + glob(os.sep.join([rstdt_path,"RSTtrees-WSJ-main-1.0","TEST","*.edus"]))
563
- docs2text = harvest_text(files)
564
- restore_docs(docs2text,**todo)
565
- if opts.corpus == "pdtb" or opts.corpus == "all":
566
- pdtb_path = input("Enter path for LDC Treebank 2 raw/wsj/ folder:\n> ")
567
- if not os.path.isdir(pdtb_path):
568
- sys.stderr.write("Can't find directory at: " + pdtb_path + "\n")
569
- sys.exit(0)
570
- files = []
571
- for i in range(0,25):
572
- dir_name = str(i) if i > 9 else "0" + str(i)
573
- files += glob(os.sep.join([pdtb_path,dir_name,"wsj_*"]))
574
- docs2text = harvest_text(files)
575
- restore_docs(docs2text,**todo)
576
- if opts.corpus == "cdtb" or opts.corpus == "all":
577
- cdtb_path = input("Enter path for LDC Chinese Discourse Treebank 0.5 raw/ folder:\n> ")
578
- if not os.path.isdir(cdtb_path):
579
- sys.stderr.write("Can't find directory at: " + cdtb_path + "\n")
580
- sys.exit(0)
581
- files = glob(os.sep.join([cdtb_path,"*.raw"]))
582
- docs2text = harvest_text(files)
583
- restore_docs(docs2text,**todo)
584
- if opts.corpus == "tdb" or opts.corpus == "all":
585
- tdb_path = input("Enter path for Turkish Discourse Bank 1.0 raw/01/ folder:\n> ")
586
- if not os.path.isdir(tdb_path):
587
- sys.stderr.write("Can't find directory at: " + tdb_path + "\n")
588
- sys.exit(0)
589
- files = glob(os.sep.join([tdb_path,"*.txt"]))
590
- docs2text = harvest_text(files)
591
- restore_docs(docs2text,**todo)
592
-
593
- if opts.corpus == "gum" or opts.corpus == "all":
594
- print("Retrieving reddit data by proxy...")
595
- data = get_proxy_data()
596
- docs2text = get_no_space_strings(data)
597
- restore_docs(docs2text,**todo)
598
-
599
-
 
 
 
 
3
 
4
  Script to handle licensed data for which underlying text cannot be posted online (e.g. LDC data).
5
  Users need a copy of the LDC distribution of an underlying resource to restore text in some of the corpora.
6
+
7
+
8
  """
9
 
10
  __author__ = "Amir Zeldes+Damien Sileo"
11
  __license__ = "Apache 2.0"
12
+ __version__ = "0.0.0"
13
 
14
  import io, re, os, sys
15
  from glob import glob
16
  from collections import defaultdict
17
  from argparse import ArgumentParser
18
 
19
+ class EDict(dict):
20
+ def __getattr__(self, k): return self.get(k, None)
21
+ def __setattr__(self, k, v): self[k] = v
22
+ def __delattr__(self, k): del self[k]
23
+
24
  PY3 = sys.version_info[0] == 3
25
  if not PY3:
26
  input = raw_input
 
548
  sys.stderr.write("o Restored text in " + str(len(dep_files)) + " .conllu files, " + str(len(tok_files)) +
549
  " .tok files and "+ str(len(rel_files)) + " .rels files\n")
550
 
551
+ def run(corpus="all", rel_files=[], dep_files=[], tok_files=[],
552
+ rstdt_path=None, pdtb_path=None, cdtb_path=None, tdb_path=None):
553
+
554
+ opts = EDict(corpus=corpus,
555
+ rel_files=rel_files,
556
+ dep_files=dep_files,
557
+ tok_files=tok_files)
558
+ todo = {k: v for k, v in opts.items() if 'files' in k}
559
+
560
+ if opts.corpus == "rstdt" or opts.corpus == "all":
561
+ if rstdt_path is None:
562
+ raise ValueError("rstdt_path is required for corpus rstdt")
563
+ if not os.path.isdir(rstdt_path):
564
+ sys.stderr.write("Can't find directory at: " + rstdt_path + "\n")
565
+ sys.exit(0)
566
+ files = glob(os.sep.join([rstdt_path, "RSTtrees-WSJ-main-1.0", "TRAINING", "*.edus"])) + \
567
+ glob(os.sep.join([rstdt_path, "RSTtrees-WSJ-main-1.0", "TEST", "*.edus"]))
568
+ docs2text = harvest_text(files)
569
+ restore_docs(docs2text, **todo)
570
+
571
+ if opts.corpus == "pdtb" or opts.corpus == "all":
572
+ if pdtb_path is None:
573
+ raise ValueError("pdtb_path is required for corpus pdtb")
574
+ if not os.path.isdir(pdtb_path):
575
+ sys.stderr.write("Can't find directory at: " + pdtb_path + "\n")
576
+ sys.exit(0)
577
+ files = []
578
+ for i in range(0, 25):
579
+ dir_name = str(i) if i > 9 else "0" + str(i)
580
+ files += glob(os.sep.join([pdtb_path, dir_name, "wsj_*"]))
581
+ docs2text = harvest_text(files)
582
+ restore_docs(docs2text, **todo)
583
+
584
+ if opts.corpus == "cdtb" or opts.corpus == "all":
585
+ if cdtb_path is None:
586
+ raise ValueError("cdtb_path is required for corpus cdtb")
587
+ if not os.path.isdir(cdtb_path):
588
+ sys.stderr.write("Can't find directory at: " + cdtb_path + "\n")
589
+ sys.exit(0)
590
+ files = glob(os.sep.join([cdtb_path, "*.raw"]))
591
+ docs2text = harvest_text(files)
592
+ restore_docs(docs2text, **todo)
593
+
594
+ if opts.corpus == "tdb" or opts.corpus == "all":
595
+ if tdb_path is None:
596
+ raise ValueError("tdb_path is required for corpus tdb")
597
+ if not os.path.isdir(tdb_path):
598
+ sys.stderr.write("Can't find directory at: " + tdb_path + "\n")
599
+ sys.exit(0)
600
+ files = glob(os.sep.join([tdb_path, "*.txt"]))
601
+ docs2text = harvest_text(files)
602
+ restore_docs(docs2text, **todo)
603
+
604
+ if opts.corpus == "gum" or opts.corpus == "all":
605
+ print("Retrieving reddit data by proxy...")
606
+ data = get_proxy_data()
607
+ docs2text = get_no_space_strings(data)
608
+ restore_docs(docs2text, **todo)