# 1. generate audio filename(token) <-> speech text file
#   speech text file can include several sentences for one token, separated by '/', e.g. go <-> 出发|走|冲冲冲
#   the several sentences will be modified manually
#   addtional randomly generated column to determinal how many times the sentences will be repeated, usually 2-3 times
# 2. use api or online service to generate audio file according to the connected speech text file
# 3. split and trim the generated audio file to get the audio clips for each sentence
# 4. map the audio clips to the original token
# 5. generate audio package by file structure

import pandas as pd
import random
from pathlib import Path
import shutil

output_path = Path('./output')

def generate_token_descriptor():
    source = pd.read_csv('pacenote_view.csv')
    source = source[source['type'] != 'MISC']
    source = source[['primary_filename', 'description']]
    # repeat_times = [random.randint(2, 3) for _ in range(len(source))]
    repeat_times = [1] * len(source)    # Edge tts doesn't random the voice
    source['repeat_times'] = repeat_times
    source.to_csv(output_path / 'token_descriptor.csv', index=False)

# generate_token_descriptor()

def generate_str_for_tts():
    source = pd.read_csv(output_path / 'token_descriptor.csv')
    list_of_tokens = []
    list_of_str = []
    for idx, row in source.iterrows():
        descs = row['description'].split('/')
        repeat_times = row['repeat_times']
        for _ in range(repeat_times):
            for desc in descs:
                list_of_tokens.append(row['primary_filename'])
                list_of_str.append(desc)

    with open(output_path / 'tts_input.txt', 'w') as f:
        for token in list_of_tokens:
            f.write(f'{token}\n')

    with open(output_path / 'tts_input_str.txt', 'w') as f:
        for s in list_of_str:
            f.write(f'{s}\n')

# generate_str_for_tts()

def slice_and_tag():
    with open(output_path / 'tts_input.txt') as f:
        tokens = f.readlines()
    
    # remove empty entry in tokens
    tokens = [t.strip() for t in tokens if t.strip()]
    print(len(tokens))

    # split audio file to clips
    from audio_slicer import get_audio_segments
    path, segments = get_audio_segments(output_path / 'output.mp3')
    print(len(segments))
    assert len(tokens) == len(segments)

# slice_and_tag()

def tag():
    with open(output_path / 'tts_input.txt') as f:
        tokens = f.readlines()
    
    # remove empty entry in tokens
    tokens = [t.strip() for t in tokens if t.strip()]
    print(len(tokens))
    # loop files in output/xiaoyi, sorted by filename
    wav_files = sorted((output_path / 'xiaoyi').glob('*.wav'), key=lambda x: int(x.name.removeprefix('xiaoyi_').removesuffix('.wav')))
    print(len(wav_files))
    assert len(tokens) == len(wav_files)

    token_dict = {}
    for i in range(len(tokens)):
        token = tokens[i]
        file = wav_files[i]
        if token not in token_dict:
            token_dict[token] = []
        token_dict[token].append(file)
    
    audio_pkg_output_path = output_path / 'xiaoyi' / 'pkg'
    audio_pkg_output_path.mkdir(exist_ok=True)
    for k, v in token_dict.items():
        if len(v) > 1:
            sub_dir = audio_pkg_output_path / k
            sub_dir.mkdir(exist_ok=True)
            for f in v:
                shutil.copy(f, sub_dir / f.name)
        else:
            shutil.copy(v[0], audio_pkg_output_path / f'{k}.wav')

tag()

