Kriyans commited on
Commit
3ddb755
1 Parent(s): 165dad6

Update indian_names.py

Browse files
Files changed (1) hide show
  1. indian_names.py +30 -19
indian_names.py CHANGED
@@ -6,21 +6,28 @@ _URL = "https://github.com/Kriyansparsana/demorepo"
6
  _TRAINING_FILE = "indian-name-org.csv"
7
 
8
  class indian_namesConfig(datasets.BuilderConfig):
 
 
9
  def __init__(self, **kwargs):
10
- super(indian_namesConfig, self).__init__(**kwargs)
 
 
 
 
 
 
 
 
11
 
12
- class indian_names(datasets.GeneratorBasedBuilder):
13
  BUILDER_CONFIGS = [
14
  indian_namesConfig(
15
- name="indian_names_dataset",
16
- version=datasets.Version("1.0.0"),
17
- description="Indian Names Dataset",
18
  ),
19
  ]
20
 
21
  def _info(self):
22
  return datasets.DatasetInfo(
23
- description="Indian Names dataset",
24
  features=datasets.Features(
25
  {
26
  "id": datasets.Value("string"),
@@ -29,27 +36,28 @@ class indian_names(datasets.GeneratorBasedBuilder):
29
  datasets.features.ClassLabel(
30
  names=[
31
  "B-PER",
32
- "I-ORG",
33
  ]
34
  )
35
  ),
36
  }
37
  ),
 
38
  )
39
 
 
 
 
 
 
 
40
 
41
- def _split_generators(self, dl_manager):
42
- urls_to_download = {
43
- "train": f"{_URL}{_TRAINING_FILE}",
44
- }
45
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
46
-
47
- return [
48
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
49
- ]
50
-
51
 
52
  def _generate_examples(self, filepath):
 
53
  with open(filepath, encoding="utf-8") as f:
54
  current_tokens = []
55
  current_labels = []
@@ -57,13 +65,15 @@ class indian_names(datasets.GeneratorBasedBuilder):
57
  for row in f:
58
  row = row.rstrip()
59
  if row:
60
- token, label = row.split(",")
61
  current_tokens.append(token)
62
  current_labels.append(label)
63
  else:
 
64
  if not current_tokens:
 
65
  continue
66
- assert len(current_tokens) == len(current_labels), "Mismatch between tokens and labels"
67
  sentence = (
68
  sentence_counter,
69
  {
@@ -76,6 +86,7 @@ class indian_names(datasets.GeneratorBasedBuilder):
76
  current_tokens = []
77
  current_labels = []
78
  yield sentence
 
79
  if current_tokens:
80
  yield sentence_counter, {
81
  "id": str(sentence_counter),
 
6
  _TRAINING_FILE = "indian-name-org.csv"
7
 
8
  class indian_namesConfig(datasets.BuilderConfig):
9
+ """The WNUT 17 Emerging Entities Dataset."""
10
+
11
  def __init__(self, **kwargs):
12
+ """BuilderConfig for WNUT 17.
13
+ Args:
14
+ **kwargs: keyword arguments forwarded to super.
15
+ """
16
+ super(WNUT_17Config, self).__init__(**kwargs)
17
+
18
+
19
+ class indina_names(datasets.GeneratorBasedBuilder):
20
+ """The WNUT 17 Emerging Entities Dataset."""
21
 
 
22
  BUILDER_CONFIGS = [
23
  indian_namesConfig(
24
+ name="wnut_17", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset"
 
 
25
  ),
26
  ]
27
 
28
  def _info(self):
29
  return datasets.DatasetInfo(
30
+ description=_DESCRIPTION,
31
  features=datasets.Features(
32
  {
33
  "id": datasets.Value("string"),
 
36
  datasets.features.ClassLabel(
37
  names=[
38
  "B-PER",
39
+ "B-ORG",
40
  ]
41
  )
42
  ),
43
  }
44
  ),
45
+ supervised_keys=None,
46
  )
47
 
48
+ def _split_generators(self, dl_manager):
49
+ """Returns SplitGenerators."""
50
+ urls_to_download = {
51
+ "train": f"{_URL}{_TRAINING_FILE}",
52
+ }
53
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
54
 
55
+ return [
56
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
57
+ ]
 
 
 
 
 
 
 
58
 
59
  def _generate_examples(self, filepath):
60
+ logger.info("⏳ Generating examples from = %s", filepath)
61
  with open(filepath, encoding="utf-8") as f:
62
  current_tokens = []
63
  current_labels = []
 
65
  for row in f:
66
  row = row.rstrip()
67
  if row:
68
+ token, label = row.split("\t")
69
  current_tokens.append(token)
70
  current_labels.append(label)
71
  else:
72
+ # New sentence
73
  if not current_tokens:
74
+ # Consecutive empty lines will cause empty sentences
75
  continue
76
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
77
  sentence = (
78
  sentence_counter,
79
  {
 
86
  current_tokens = []
87
  current_labels = []
88
  yield sentence
89
+ # Don't forget last sentence in dataset 🧐
90
  if current_tokens:
91
  yield sentence_counter, {
92
  "id": str(sentence_counter),