Kriyans commited on
Commit
06e4a0f
1 Parent(s): 17fdc5b

Update indian_names.py

Browse files
Files changed (1) hide show
  1. indian_names.py +41 -32
indian_names.py CHANGED
@@ -8,10 +8,10 @@ _URL = "https://github.com/Kriyansparsana/demorepo/blob/f4501f1de2c759ee215952b2
8
 
9
 
10
  class indian_namesConfig(datasets.BuilderConfig):
11
- """BuilderConfig for Conll2003"""
12
 
13
  def __init__(self, **kwargs):
14
- """BuilderConfig forConll2003.
15
  Args:
16
  **kwargs: keyword arguments forwarded to super.
17
  """
@@ -19,10 +19,12 @@ class indian_namesConfig(datasets.BuilderConfig):
19
 
20
 
21
  class indian_names(datasets.GeneratorBasedBuilder):
22
- """Conll2003 dataset."""
23
 
24
  BUILDER_CONFIGS = [
25
- indian_namesConfig(name="indian_names", version=datasets.Version("1.0.0"), description="indian_names dataset"),
 
 
26
  ]
27
 
28
  def _info(self):
@@ -35,8 +37,7 @@ class indian_names(datasets.GeneratorBasedBuilder):
35
  datasets.features.ClassLabel(
36
  names=[
37
  "B-PER",
38
- "B-ORG",
39
-
40
  ]
41
  )
42
  ),
@@ -48,7 +49,7 @@ class indian_names(datasets.GeneratorBasedBuilder):
48
  def _split_generators(self, dl_manager):
49
  """Returns SplitGenerators."""
50
  urls_to_download = {
51
- "train": f"{_URL}"
52
  }
53
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
54
 
@@ -56,32 +57,40 @@ class indian_names(datasets.GeneratorBasedBuilder):
56
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
57
  ]
58
 
59
-
60
  def _generate_examples(self, filepath):
61
  logger.info("⏳ Generating examples from = %s", filepath)
62
  with open(filepath, encoding="utf-8") as f:
63
- guid = 0
64
- tokens = []
65
- ner_tags = []
66
- for line in f:
67
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
68
- if tokens:
69
- yield guid, {
70
- "id": str(guid),
71
- "tokens": tokens,
72
- "ner_tags": ner_tags,
73
- }
74
- guid += 1
75
- ner_tags = []
76
  else:
77
- # conll2003 tokens are space separated
78
- splits = line.split(" ")
79
- tokens.append(splits[0])
80
- ner_tags.append(splits[3].rstrip())
81
- # last example
82
- if tokens:
83
- yield guid, {
84
- "id": str(guid),
85
- "tokens": tokens,
86
- "ner_tags": ner_tags,
87
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
 
10
  class indian_namesConfig(datasets.BuilderConfig):
11
+ """The WNUT 17 Emerging Entities Dataset."""
12
 
13
  def __init__(self, **kwargs):
14
+ """BuilderConfig for WNUT 17.
15
  Args:
16
  **kwargs: keyword arguments forwarded to super.
17
  """
 
19
 
20
 
21
  class indian_names(datasets.GeneratorBasedBuilder):
22
+ """The WNUT 17 Emerging Entities Dataset."""
23
 
24
  BUILDER_CONFIGS = [
25
+ indian_namesConfig(
26
+ name="indian_names", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset"
27
+ ),
28
  ]
29
 
30
  def _info(self):
 
37
  datasets.features.ClassLabel(
38
  names=[
39
  "B-PER",
40
+ "B-ORG"
 
41
  ]
42
  )
43
  ),
 
49
  def _split_generators(self, dl_manager):
50
  """Returns SplitGenerators."""
51
  urls_to_download = {
52
+ "train": f"{_URL}",
53
  }
54
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
55
 
 
57
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
58
  ]
59
 
 
60
  def _generate_examples(self, filepath):
61
  logger.info("⏳ Generating examples from = %s", filepath)
62
  with open(filepath, encoding="utf-8") as f:
63
+ current_tokens = []
64
+ current_labels = []
65
+ sentence_counter = 0
66
+ for row in f:
67
+ row = row.rstrip()
68
+ if row:
69
+ token, label = row.split("\t")
70
+ current_tokens.append(token)
71
+ current_labels.append(label)
 
 
 
 
72
  else:
73
+ # New sentence
74
+ if not current_tokens:
75
+ # Consecutive empty lines will cause empty sentences
76
+ continue
77
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
78
+ sentence = (
79
+ sentence_counter,
80
+ {
81
+ "id": str(sentence_counter),
82
+ "tokens": current_tokens,
83
+ "ner_tags": current_labels,
84
+ },
85
+ )
86
+ sentence_counter += 1
87
+ current_tokens = []
88
+ current_labels = []
89
+ yield sentence
90
+ # Don't forget last sentence in dataset 🧐
91
+ if current_tokens:
92
+ yield sentence_counter, {
93
+ "id": str(sentence_counter),
94
+ "tokens": current_tokens,
95
+ "ner_tags": current_labels,
96
+ }