File size: 3,390 Bytes
d27919e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#content for diph1.txt
# 001. Nu este treaba lor ce constituţie avem.
# 002. Ea era tot timpul pe minge.
# ...
# 499. Ea e singurul meu sprijin, fără ea eram ţărână.
# 500. A, de fapt ar fi ceva.

#001 is the audio_index


#example paths for each collection:
# TEXT_FOLDER + '/' + COLLECTION[0] + '.txt' (from here we take audio_index )
# ACCENT_FOLDER + '/' + COLLECTION[0] + '/adr_' + COLLECTION[0] + '_' + audio_index
#...
# AUDIO_FOLDER

#Lets build a metadata.csv with column:
#file_name,duration,start_time,end_time,text,accent,phonemes,hts_labels,hts_label_path
#audio_001,23.35,0.0,23.35,nu jeste..., ,n u j e...,1500000 2200000 n | ...,audio_001.lab


import os
import csv

# Constants
ACCENT_FOLDER = 'accent'
AUDIO_FOLDER = 'audio'
SYN_AUDIO_FOLDER = 'synthesized_audio'
HTS_LABELS_FOLDER = 'hts_labels'
PHONEMES_FOLDER = 'phoneme'
TEXT_FOLDER = 'text'

COLLECTIONS = ['news', 'novel', 'SUS']

OUTPUT_CSV = 'metadata.csv'

def read_text_file(text_path):
    """Returns a dict: audio_index -> sentence"""
    mapping = {}
    with open(text_path, 'r', encoding='utf-8') as f:
        for line in f:
            if '. ' in line:
                idx, sentence = line.strip().split('. ', 1)
                idx = idx.zfill(3)
                mapping[idx] = sentence.strip()
    return mapping

def read_phoneme_file(filepath):
    phonemes = []

    with open(filepath, 'r', encoding='utf-8') as f:
        for line in f:
            parts = line.strip().split()
            if len(parts) == 3:
                start, end, label = parts
                start_us = int(start)
                end_us = int(end)
                start_sec = start_us / 1e6
                end_sec = end_us / 1e6
                if label != "#":
                    phonemes.append((start_sec, end_sec, label))
    return phonemes

def main():
    rows = []

    for collection in COLLECTIONS:
        print(f"Processing collection: {collection}")
        text_map = read_text_file(f"{TEXT_FOLDER}/{collection}.txt")

        for audio_index, text in text_map.items():
            file_name = f"{collection}_{audio_index}"

            # Paths
            phoneme_path = os.path.join(PHONEMES_FOLDER, collection, f"adr_{collection}_{audio_index}.phs")
            accent_path = os.path.join(ACCENT_FOLDER, collection, f"adr_{collection}_{audio_index}")
            hts_label_path = os.path.join(HTS_LABELS_FOLDER, collection, f"adr_{file_name}.lab") # Path calculated just for reference

            accent = ""
            if os.path.exists(accent_path):
                with open(accent_path, 'r', encoding='utf-8') as f:
                    accent = f.read().strip()

            rows.append([
                AUDIO_FOLDER + '/' + collection + '/adr_' + file_name + '.wav',
                text,
                accent,
                phoneme_path,
                hts_label_path
            ])

    # Write metadata CSV
    with open(OUTPUT_CSV, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f, quoting=csv.QUOTE_ALL)
        writer.writerow(["file_name", "text", "accent", "phonemes", "hts_label_path"])
        for row in rows:
            writer.writerow(row)

    print(f"\n✅ metadata.csv written with {len(rows)} entries.")

if __name__ == "__main__":
    main()