anishka commited on
Commit
627b999
1 Parent(s): 9122c6a

Upload CodeSwitching-TE-EN.py

Browse files
Files changed (1) hide show
  1. CodeSwitching-TE-EN.py +48 -54
CodeSwitching-TE-EN.py CHANGED
@@ -1,16 +1,24 @@
1
- # Loading script for the Telugu-English Codeswitch Transliterate dataset
2
  import datasets
3
- from conllu import parse_incr
4
- import conllu
5
 
6
  logger = datasets.logging.get_logger(__name__)
7
 
8
  _CITATION = """ """
9
 
10
- _DESCRIPTION = """Telugu English POS Codeswitch dataset.
 
 
 
 
 
 
 
 
 
 
11
  """
12
 
13
- _HOMEPAGE = ""
14
 
15
  _URL = "https://huggingface.co/datasets/anishka/CodeSwitching-TE-EN/resolve/main/"
16
  _TRAINING_FILE = "te_mtg-ud-train.conllu"
@@ -18,25 +26,25 @@ _DEV_FILE = "te_mtg-ud-dev.conllu"
18
  _TEST_FILE = "te_mtg-ud-test.conllu"
19
 
20
 
21
- class TeEnCodeSwitchConfig(datasets.BuilderConfig):
22
  """ Builder config for the Ancora Ca NER dataset """
23
 
24
  def __init__(self, **kwargs):
25
- """BuilderConfig for TeEnCodeSwitch.
26
  Args:
27
  **kwargs: keyword arguments forwarded to super.
28
  """
29
- super(TeEnCodeSwitchConfig, self).__init__(**kwargs)
30
 
31
 
32
- class TeEnCodeSwitch(datasets.GeneratorBasedBuilder):
33
- """ Te-En-CodeSwitch dataset."""
34
 
35
  BUILDER_CONFIGS = [
36
- TeEnCodeSwitchConfig(
37
- name="Te-En-CodeSwitch",
38
- version=datasets.Version("0.0.1"),
39
- description="Te-En-CodeSwitch dataset"
40
  ),
41
  ]
42
 
@@ -71,12 +79,6 @@ class TeEnCodeSwitch(datasets.GeneratorBasedBuilder):
71
  ]
72
  )
73
  ),
74
- "xpos": datasets.Sequence(datasets.Value("string")),
75
- "feats": datasets.Sequence(datasets.Value("string")),
76
- "head": datasets.Sequence(datasets.Value("string")),
77
- "deprel": datasets.Sequence(datasets.Value("string")),
78
- "deps": datasets.Sequence(datasets.Value("string")),
79
- "misc": datasets.Sequence(datasets.Value("string")),
80
  }
81
  ),
82
  supervised_keys=None,
@@ -92,8 +94,6 @@ class TeEnCodeSwitch(datasets.GeneratorBasedBuilder):
92
  "test": f"{_URL}{_TEST_FILE}",
93
  }
94
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
95
- print ("Downloading files: ")
96
- print (urls_to_download)
97
 
98
  return [
99
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
@@ -102,36 +102,30 @@ class TeEnCodeSwitch(datasets.GeneratorBasedBuilder):
102
  ]
103
 
104
  def _generate_examples(self, filepath):
105
-
106
- sentence = []
107
- with open(filepath, 'r') as file:
108
- for line in file:
109
- line = line.strip()
110
- # Skip comment lines and empty lines
111
- if line.startswith('#') or not line:
112
- continue
113
- if line == '':
114
- # If an empty line is encountered, yield the current sentence
115
- yield sentence
116
- # Reset the sentence for the next iteration
117
- sentence = []
 
 
 
118
  else:
119
- # Split the line by tabs to get fields
120
- fields = line.split('\t')
121
- # Add the fields to the sentence as a dictionary
122
- sentence.append({
123
- 'id': fields[0],
124
- 'form': fields[1],
125
- 'lemma': fields[2],
126
- 'upos': fields[3],
127
- 'xpos': fields[4],
128
- 'feats': fields[5],
129
- 'head': fields[6],
130
- 'deprel': fields[7],
131
- 'deps': fields[8],
132
- 'misc': fields[9]
133
- })
134
-
135
- # Yield the last sentence if there is one
136
- if sentence:
137
- yield sentence
 
1
+ # Loading script for the Ancora NER dataset.
2
  import datasets
 
 
3
 
4
  logger = datasets.logging.get_logger(__name__)
5
 
6
  _CITATION = """ """
7
 
8
+ _DESCRIPTION = """AnCora Catalan NER.
9
+ This is a dataset for Named Eentity Reacognition (NER) from Ancora corpus adapted for
10
+ Machine Learning and Language Model evaluation purposes.
11
+ Since multiwords (including Named Entites) in the original Ancora corpus are aggregated as
12
+ a single lexical item using underscores (e.g. "Ajuntament_de_Barcelona")
13
+ we splitted them to align with word-per-line format, and added conventional Begin-Inside-Outside (IOB)
14
+ tags to mark and classify Named Entites.
15
+ We did not filter out the different categories of NEs from Ancora (weak and strong).
16
+ We did 6 minor edits by hand.
17
+ AnCora corpus is used under [CC-by] (https://creativecommons.org/licenses/by/4.0/) licence.
18
+ This dataset was developed by BSC TeMU as part of the AINA project, and to enrich the Catalan Language Understanding Benchmark (CLUB).
19
  """
20
 
21
+ _HOMEPAGE = """https://zenodo.org/record/4762031"""
22
 
23
  _URL = "https://huggingface.co/datasets/anishka/CodeSwitching-TE-EN/resolve/main/"
24
  _TRAINING_FILE = "te_mtg-ud-train.conllu"
 
26
  _TEST_FILE = "te_mtg-ud-test.conllu"
27
 
28
 
29
+ class AncoraCaNerConfig(datasets.BuilderConfig):
30
  """ Builder config for the Ancora Ca NER dataset """
31
 
32
  def __init__(self, **kwargs):
33
+ """BuilderConfig for AncoraCaNer.
34
  Args:
35
  **kwargs: keyword arguments forwarded to super.
36
  """
37
+ super(AncoraCaNerConfig, self).__init__(**kwargs)
38
 
39
 
40
+ class AncoraCaNer(datasets.GeneratorBasedBuilder):
41
+ """ AncoraCaNer dataset."""
42
 
43
  BUILDER_CONFIGS = [
44
+ AncoraCaNerConfig(
45
+ name="AncoraCaNer",
46
+ version=datasets.Version("2.0.0"),
47
+ description="AncoraCaNer dataset"
48
  ),
49
  ]
50
 
 
79
  ]
80
  )
81
  ),
 
 
 
 
 
 
82
  }
83
  ),
84
  supervised_keys=None,
 
94
  "test": f"{_URL}{_TEST_FILE}",
95
  }
96
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
 
 
97
 
98
  return [
99
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
 
102
  ]
103
 
104
  def _generate_examples(self, filepath):
105
+ logger.info("⏳ Generating examples from = %s", filepath)
106
+ with open(filepath, encoding="utf-8") as f:
107
+ guid = 0
108
+ tokens = []
109
+ ner_tags = []
110
+ for line in f:
111
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n" or line.startswith("#"):
112
+ if tokens:
113
+ yield guid, {
114
+ "id": str(guid),
115
+ "tokens": tokens,
116
+ "ner_tags": ner_tags,
117
+ }
118
+ guid += 1
119
+ tokens = []
120
+ ner_tags = []
121
  else:
122
+ # AncoraCaNer tokens are space separated
123
+ splits = line.split('\t')
124
+ tokens.append(splits[9])
125
+ ner_tags.append(splits[3].rstrip())
126
+ # last example
127
+ yield guid, {
128
+ "id": str(guid),
129
+ "tokens": tokens,
130
+ "ner_tags": ner_tags,
131
+ }