import subprocess
from os import path
import os
import torch
import sys
from tqdm import tqdm

if __name__ == "__main__":
    current_path = os.path.dirname(os.path.abspath(__file__))
    os.environ["PATH"] = os.path.join(current_path, "ffmpeg/bin/") + ";" + os.environ["PATH"]
import torchaudio
from pydub import AudioSegment
import shutil
import text
import whisper

current_path = os.path.dirname(os.path.abspath(__file__))


def get_wav(input_files):
    save_dir = path.join(path.dirname(input_files[0]), 'extracted_wav')
    wav_files = []
    if not path.exists(save_dir):
        os.makedirs(save_dir)
    for file in input_files:
        output_file = path.join(save_dir, path.basename(file).split('.')[0] + '.wav')
        command = 'ffmpeg -loglevel warning -y -i "{}" -strict -2 "{}"'.format(file, output_file)
        subprocess.call(command, shell=True)
        wav_files.append(output_file)
    return wav_files


def denoise_and_cut(wav_files):
    model_name = '955717e8'
    for file in wav_files:
        assert file.endswith('.wav')
        output_dir = path.dirname(file)
        os.system(
            f"{sys.executable} -m demucs --two-stems=vocals --repo {path.join(current_path, 'demucs_checkpoints')} -n {model_name} --out {output_dir} \"{file}\"")
        stem_path = path.join(output_dir, f"{model_name}/{path.basename(file).split('.')[0]}/vocals.wav")
        shutil.move(stem_path, file)
        shutil.rmtree(path.join(path.dirname(file), model_name))
    for file in wav_files:
        wav, sr = torchaudio.load(file, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)
        # merge two channels into one
        wav = wav.mean(dim=0).unsqueeze(0)
        if sr != 22050:
            wav = torchaudio.transforms.Resample(orig_freq=sr, new_freq=22050)(wav)
        torchaudio.save(file, wav, 22050, channels_first=True)

    # cut_audio
    cut_audio_files = []
    cnt = 1
    wav_files_dir = path.dirname(wav_files[0])
    cut_audio_dir = path.join(path.dirname(wav_files_dir), 'cut_audio')
    if path.exists(cut_audio_dir):
        shutil.rmtree(cut_audio_dir)
    os.makedirs(cut_audio_dir)
    for file in tqdm(wav_files, desc='cut audio: '):
        wav_file = AudioSegment.from_wav(file)
        if wav_file.duration_seconds < 1.5:
            continue
        if wav_file.duration_seconds <= 10:
            output_file_name = path.join(cut_audio_dir, f'{cnt}.wav')  # 创建输出文件名
            wav_file.export(output_file_name, format='wav')
            cnt += 1
            cut_audio_files.append(output_file_name)
            continue
        # 每5秒钟分割一次音频，并保存为多个5秒钟的文件
        chunk_length_ms = 5000
        for i, chunk in enumerate(wav_file[::chunk_length_ms]):
            if chunk.duration_seconds < 1.5:
                continue
            # 创建输出文件名
            output_file_name = path.join(cut_audio_dir, f'{cnt}.wav')
            # 将数据写入输出文件
            chunk.export(output_file_name, format='wav')
            cnt += 1
            cut_audio_files.append(output_file_name)
    # 切割完成，删除wav_files_dir目录
    shutil.rmtree(wav_files_dir)
    return cut_audio_files


def data_annotation(cut_wavs, file_list_name, model_name, language):
    model = whisper.load_model(model_name,
                               download_root=path.join(current_path, 'whisper_checkpoints'))  # base medium large
    file_list = []
    for wav_file in tqdm(cut_wavs, desc='Audio annotation: '):
        # result = model.transcribe(wav_file)
        result = model.transcribe(wav_file, language=language)
        file_list.append([wav_file, '0', result["text"]])
    txt_file = path.join(path.dirname(cut_wavs[0]), file_list_name + '.txt')
    with open(txt_file, 'w', encoding='utf-8') as f:
        for obj in file_list:
            f.write('|'.join(obj) + '\n')
    model.cpu()
    del model
    torch.cuda.empty_cache()
    return txt_file


def text_clean(file_path, audio_language_tag):
    with open(file_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    with open(path.join(current_path, f'filelists/{path.basename(file_path)}.cleaned'), 'w', encoding='utf-8') as f:
        for line in lines:
            line = line.strip().split('|')
            line[-1] = text._clean_text(f'{audio_language_tag}{line[-1]}{audio_language_tag}', ['cjke_cleaners2'])
            f.write('|'.join(line) + '\n')
    print('text_clean done! ')


def process_data(audio_file_or_dir, audio_language_tag, whisper_size):
    def is_audio_or_video(filename):
        audio_ext = ['.mp3', '.wav', '.ogg', '.flac', '.wma', '.aac']
        video_ext = ['.mp4', '.avi', '.mkv', '.m4v', '.mov', '.wmv', '.flv', '.webm']
        ext = os.path.splitext(filename)[-1].lower()
        if ext in audio_ext or ext in video_ext:
            return True
        else:
            return False

    whisper_language_map = {'[ZH]': 'zh', '[JA]': 'ja', '[EN]': 'en'}
    audio_file_or_dir = os.path.normpath(audio_file_or_dir)
    if path.isdir(audio_file_or_dir):
        wav_files = get_wav(
            [path.join(audio_file_or_dir, f) for f in os.listdir(audio_file_or_dir) if is_audio_or_video(f)])
        file_list_name = path.split(audio_file_or_dir)[-1]
    else:
        wav_files = get_wav([audio_file_or_dir])
        file_list_name = path.basename(audio_file_or_dir).split('.')[0]
    cut_audio_files = denoise_and_cut(wav_files)
    file_list = data_annotation(cut_audio_files, file_list_name, whisper_size, whisper_language_map[audio_language_tag])
    text_clean(file_list, audio_language_tag)


if __name__ == "__main__":
    process_data('E:\\测试\\2.wav')
    # process_data('E:\\测试\\')
    pass
