BrunoHays commited on
Commit
6c3c591
1 Parent(s): d1570d0

Update multilingual-TEDX-fr.py

Browse files
Files changed (1) hide show
  1. multilingual-TEDX-fr.py +4 -4
multilingual-TEDX-fr.py CHANGED
@@ -89,7 +89,7 @@ class TEDX(datasets.GeneratorBasedBuilder):
89
  def _split_by_audio_file(self, segments_path: str, sentences_path: str, split_name: str) -> Tuple[List[str], List[List[Utterance]]]:
90
  speaker_paths = []
91
  seen_ids = set()
92
- segments = []
93
  with open(segments_path, "r") as segments, open(sentences_path) as sentences:
94
  segments_reader = csv.DictReader(segments, delimiter=' ', fieldnames=["segment_id", "speaker_id", "start_timestamp", "end_timestamp"])
95
  sentences_list = sentences.readlines()
@@ -97,14 +97,14 @@ class TEDX(datasets.GeneratorBasedBuilder):
97
  if segment["speaker_id"] not in seen_ids:
98
  seen_ids.add(segment["speaker_id"])
99
  speaker_paths.append(Path("data") / Path(split_name) / Path("wav") / Path(f"{segment['speaker_id']}.flac"))
100
- segments.append([])
101
- segments[-1].append(Utterance(speaker_id=segment["speaker_id"],
102
  index=int(segment["segment_id"].split("_")[1]),
103
  sentence=sentence,
104
  start_timestamp=segment["segment_start_timestamp"],
105
  end_timestamp=segment["segment_end_timestamp"]
106
  ))
107
- return speaker_paths, segments
108
 
109
 
110
 
 
89
  def _split_by_audio_file(self, segments_path: str, sentences_path: str, split_name: str) -> Tuple[List[str], List[List[Utterance]]]:
90
  speaker_paths = []
91
  seen_ids = set()
92
+ segments_by_speaker = []
93
  with open(segments_path, "r") as segments, open(sentences_path) as sentences:
94
  segments_reader = csv.DictReader(segments, delimiter=' ', fieldnames=["segment_id", "speaker_id", "start_timestamp", "end_timestamp"])
95
  sentences_list = sentences.readlines()
 
97
  if segment["speaker_id"] not in seen_ids:
98
  seen_ids.add(segment["speaker_id"])
99
  speaker_paths.append(Path("data") / Path(split_name) / Path("wav") / Path(f"{segment['speaker_id']}.flac"))
100
+ segments_by_speaker.append([])
101
+ segments_by_speaker[-1].append(Utterance(speaker_id=segment["speaker_id"],
102
  index=int(segment["segment_id"].split("_")[1]),
103
  sentence=sentence,
104
  start_timestamp=segment["segment_start_timestamp"],
105
  end_timestamp=segment["segment_end_timestamp"]
106
  ))
107
+ return speaker_paths, segments_by_speaker
108
 
109
 
110