Kriyans commited on
Commit
5a8ec1d
1 Parent(s): 879ee4f

Update indian_names.py

Browse files
Files changed (1) hide show
  1. indian_names.py +41 -44
indian_names.py CHANGED
@@ -1,9 +1,7 @@
1
  import datasets
2
 
3
-
4
  logger = datasets.logging.get_logger(__name__)
5
 
6
-
7
  _URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/wnut17train%20(1).conll"
8
 
9
  class indian_namesConfig(datasets.BuilderConfig):
@@ -16,7 +14,6 @@ class indian_namesConfig(datasets.BuilderConfig):
16
  """
17
  super(indian_namesConfig, self).__init__(**kwargs)
18
 
19
-
20
  class indian_names(datasets.GeneratorBasedBuilder):
21
  """The WNUT 17 Emerging Entities Dataset."""
22
 
@@ -59,45 +56,45 @@ class indian_names(datasets.GeneratorBasedBuilder):
59
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
60
  ]
61
 
62
- def _generate_examples(self, filepath):
63
- logger.info("⏳ Generating examples from = %s", filepath)
64
- with open(filepath, encoding="utf-8") as f:
65
- current_tokens = []
66
- current_labels = []
67
- sentence_counter = 0
68
- for row in f:
69
- row = row.rstrip()
70
- if row:
71
- if "\t" in row:
72
- token, label = row.split("\t")
73
- current_tokens.append(token)
74
- current_labels.append(label)
 
 
 
 
75
  else:
76
- # Handle cases where the delimiter is missing
77
- # You can choose to skip these rows or handle them differently
78
- logger.warning(f"Delimiter missing in row: {row}")
79
- else:
80
- # New sentence
81
- if not current_tokens:
82
- # Consecutive empty lines will cause empty sentences
83
- continue
84
- assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
85
- sentence = (
86
- sentence_counter,
87
- {
88
- "id": str(sentence_counter),
89
- "tokens": current_tokens,
90
- "ner_tags": current_labels,
91
- },
92
- )
93
- sentence_counter += 1
94
- current_tokens = []
95
- current_labels = []
96
- yield sentence
97
- # Don't forget the last sentence in the dataset 🧐
98
- if current_tokens:
99
- yield sentence_counter, {
100
- "id": str(sentence_counter),
101
- "tokens": current_tokens,
102
- "ner_tags": current_labels,
103
- }
 
1
  import datasets
2
 
 
3
  logger = datasets.logging.get_logger(__name__)
4
 
 
5
  _URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/wnut17train%20(1).conll"
6
 
7
  class indian_namesConfig(datasets.BuilderConfig):
 
14
  """
15
  super(indian_namesConfig, self).__init__(**kwargs)
16
 
 
17
  class indian_names(datasets.GeneratorBasedBuilder):
18
  """The WNUT 17 Emerging Entities Dataset."""
19
 
 
56
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
57
  ]
58
 
59
+ def _generate_examples(self, filepath):
60
+ logger.info("⏳ Generating examples from = %s", filepath)
61
+ with open(filepath, encoding="utf-8") as f:
62
+ current_tokens = []
63
+ current_labels = []
64
+ sentence_counter = 0
65
+ for row in f:
66
+ row = row.rstrip()
67
+ if row:
68
+ if "\t" in row:
69
+ token, label = row.split("\t")
70
+ current_tokens.append(token)
71
+ current_labels.append(label)
72
+ else:
73
+ # Handle cases where the delimiter is missing
74
+ # You can choose to skip these rows or handle them differently
75
+ logger.warning(f"Delimiter missing in row: {row}")
76
  else:
77
+ # New sentence
78
+ if not current_tokens:
79
+ # Consecutive empty lines will cause empty sentences
80
+ continue
81
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
82
+ sentence = (
83
+ sentence_counter,
84
+ {
85
+ "id": str(sentence_counter),
86
+ "tokens": current_tokens,
87
+ "ner_tags": current_labels,
88
+ },
89
+ )
90
+ sentence_counter += 1
91
+ current_tokens = []
92
+ current_labels = []
93
+ yield sentence
94
+ # Don't forget the last sentence in the dataset 🧐
95
+ if current_tokens:
96
+ yield sentence_counter, {
97
+ "id": str(sentence_counter),
98
+ "tokens": current_tokens,
99
+ "ner_tags": current_labels,
100
+ }