import argparse
import glob
import os
from tqdm import tqdm

import json
import ffmpeg
import random
import shutil

from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess

speaker = "hpp"

# final_08:42.497.wav, data/train/PS1H5561_08:38.402.wav 调整
# split mp3 to wav, and store in raw folder
def split_audio():
    destFolder = os.path.join(args.des_dir, "raw")
    if os.path.exists(os.path.join(args.des_dir, "raw")):
        print("raw folder exist, skip split audio data")
        return
    os.makedirs(destFolder)

    voicedata_config = {}
    with open('hpp_voice_data.json', 'r') as f:
        voicedata_config = json.load(f)

    for file in voicedata_config:
        if not os.path.exists(os.path.join(args.src_dir, file)):
            continue
        fileName = os.path.join(args.src_dir, file)
        for times in tqdm(voicedata_config[file]):
            stream = ffmpeg.input(fileName, ss=times[0], t=times[1])
            outputFileName = os.path.basename(file).split(".")[0] + "_" + times[0] + ".wav"
            fullOutputName = os.path.join(args.des_dir, "raw", outputFileName)
            ffmpeg.output(stream, fullOutputName, ar="16000",
                          ac=1, format="wav").run(quiet=True)

def copy_wavs_to(wavs, dest):
    destFolder = os.path.join(args.des_dir, dest)
    if os.path.exists(destFolder):
        print("{} folder exist, skip copy wav data".format(dest))
        return
    os.makedirs(destFolder)
    for wav in wavs:
        basename = os.path.basename(wav)
        shutil.copy(os.path.join(args.des_dir, "raw", basename), os.path.join(destFolder, basename))

def split_dataset(train_ratio=0.9, test_ratio=0.1):
    wavs = list(glob.glob('{}/*wav'.format(os.path.join(args.des_dir, "raw"))))
    random.shuffle(wavs)

    train_size = int(len(wavs) * train_ratio)
    test_size = int(len(wavs) * test_ratio)

    copy_wavs_to(wavs[:train_size], "train")
    copy_wavs_to(wavs[train_size:], "test")

def stt_data():
    model = AutoModel(
        model="iic/SenseVoiceSmall",
        vad_model="fsmn-vad",
        vad_kwargs={"max_single_segment_time": 30000},
        device="cuda:0",
        disable_update=True
    )

    for t in ["train", "test"]:
        wav2txtFileName = '{}/{}/wav2txt.txt'.format(args.des_dir, t)
        if os.path.exists(wav2txtFileName):
            print("wav2txt for {} exist, skip generate".format(t))
            continue
        wavs = list(glob.glob('{}/{}/*wav'.format(args.des_dir, t)))

        wav2text = {}
        for wav in tqdm(wavs):
            res = model.generate(
                input=wav,
                cache={},
                language="zn",  # "zn", "en", "yue", "ja", "ko", "nospeech"
                use_itn=True,
                batch_size_s=60,
                merge_vad=True,  #
                merge_length_s=15,
            )
            wav2text[wav] = rich_transcription_postprocess(res[0]["text"])
        
        with open(wav2txtFileName, 'w') as f:
            for k, v in wav2text.items():
                f.write('{} {}\n'.format(k, v))

def volume_increase():
    None

def prepare_data():
    utt2wav, utt2text, utt2spk, spk2utt = {}, {}, {}, {}
    for t in ["train", "test"]:
        wav2txtFileName = '{}/{}/wav2txt.txt'.format(args.des_dir, t)

        with open(wav2txtFileName, "r") as f:
            lines = f.readlines()
            lines = [l.split(' ') for l in lines]
        for wav, content in tqdm(lines):
            wav, content = wav.strip(), content.strip()
            if not os.path.exists(wav):
                continue
            utt = os.path.basename(wav).replace('.wav', '')
            utt2wav[utt] = wav
            utt2text[utt] = content
            utt2spk[utt] = speaker
            if speaker not in spk2utt:
                spk2utt[speaker] = []
            spk2utt[speaker].append(utt)

        with open('{}/{}/wav.scp'.format(args.des_dir, t), 'w') as f:
            for k, v in utt2wav.items():
                f.write('{} {}\n'.format(k, v))
        with open('{}/{}/text'.format(args.des_dir, t), 'w') as f:
            for k, v in utt2text.items():
                f.write('{} {}\n'.format(k, v))
        with open('{}/{}/utt2spk'.format(args.des_dir, t), 'w') as f:
            for k, v in utt2spk.items():
                f.write('{} {}\n'.format(k, v))
        with open('{}/{}/spk2utt'.format(args.des_dir, t), 'w') as f:
            for k, v in spk2utt.items():
                f.write('{} {}\n'.format(k, ' '.join(v)))


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--src_dir',
                        type=str)
    parser.add_argument('--des_dir',
                        type=str)
    args = parser.parse_args()
    split_audio()
    split_dataset()
    stt_data()
    prepare_data()
