# Generate audio package with cosyvoice automatically
# Cosyvoice source code required and configured correctly with CosyVoice2-0.5B model.

# Example:
# python autogen_audio_pkg.py --csv ./pacenote_view.csv --cosy /mnt/k/lab/CosyVoice/ --prompt_audio /mnt/c/Users/Admin/Documents/My\ Games/ZTMZClub_nextgen/codrivers/拉稀车手老王/system_start_stage/ganfanyaoshen.mp3 --prompt_text '干翻瑶神，就在这一把' --out /mnt/k/data/ai_laowang --strong

# python autogen_audio_pkg.py --csv ./pacenote_view.csv --cosy /mnt/k/lab/CosyVoice/ --prompt_audio /mnt/c/Users/Admin/Documents/My\ Games/ZTMZClub_nextgen/codrivers/kiko/system_start_stage/system_start_stage.wav --prompt_text '抓好安全带系好方向盘，我们开始噜' --out /mnt/k/data/ai_kiko3 --strong --instruct 粤语 --use-instruct

import librosa
import numpy as np
import pandas as pd
from pathlib import Path
import sys
from argparse import ArgumentParser
import torch
import torchaudio
from tqdm import tqdm
from loguru import logger
from uuid import uuid4
import torchaudio.functional as F
max_val = 0.8

def parse_args():
    parser = ArgumentParser()
    parser.add_argument('--csv', type=str, required=True, help='Path to pacenote_view.csv')
    parser.add_argument('--cosy', type=str, required=True, help='Path to the cosyvoice2 folder')
    parser.add_argument('--prompt_audio', type=str, required=True, help='Path to the prompt audio file')
    parser.add_argument('--prompt_text', type=str, required=True, help="the text of the prompt audio")
    parser.add_argument('--out', type=str, required=True, help='Path to the output folder')
    parser.add_argument('--seed', type=int, default=42, help='Random seed')
    parser.add_argument('--instruct', type=str, required=False, help='Instruction for the audio', default="激情热烈紧张")
    # add this flag for emphasis
    parser.add_argument('--strong', action='store_true', help='Use strong prompt')
    parser.add_argument('--speed', type=float, default=1.0, help='Speed of the generated audio')
    parser.add_argument('--use-instruct', action='store_true', help='Use instruct')
    args = parser.parse_args()
    args = vars(args)
    return args

def read_pacenote_csv(csv_path: Path):
    df = pd.read_csv(csv_path)
    result = {}
    for index, row in df.iterrows():
        speech_texts = row['speech_texts'].split('/')
        result[row['primary_filename']] = speech_texts
    return result

# def normalize_audio(audio_tensor, sample_rate, target_db=-10):
#      # Calculate the current loudness
#     rms = torch.sqrt(torch.mean(audio_tensor[0]**2))
#     current_db = 20 * torch.log10(rms)
    
#     # Calculate necessary adjustment
#     gain = target_db - current_db
#     audio_normalized = audio_tensor * (10**(gain / 20))
#     return audio_normalized
    

def trim_silence_vad(audio_tensor, sample_rate, vad_options=None):
    final_audio, _ = librosa.effects.trim(audio_tensor, top_db=60, frame_length=440, hop_length=220)
    logger.info(f'len trimed before: {len(audio_tensor[0])}, after: {len(final_audio[0])}')
    # normalized_audio = normalize_audio(final_audio, sample_rate)
    return final_audio

def postprocess(cosyvoice, speech, top_db=60, hop_length=220, win_length=440):
    speech, _ = librosa.effects.trim(
        speech, top_db=top_db,
        frame_length=win_length,
        hop_length=hop_length
    )
    if speech.abs().max() > max_val:
        speech = speech / speech.abs().max() * max_val
    speech = torch.concat([speech, torch.zeros(1, int(cosyvoice.sample_rate * 0.2))], dim=1)
    return speech

def output_audio(audio_pkg_folder: Path, filename: str, text: str, cosyvoice2, prompt_audio, use_instruct, prompt_text, instruct, is_strong=True, speed=1.0, index=0, seed=0):
    output_path = audio_pkg_folder / filename
    output_path.mkdir(exist_ok=True, parents=True)
    speech_text = f'<strong>{text}</strong>' if is_strong else text

    if use_instruct:
        for i, j in enumerate(cosyvoice2.inference_instruct2(speech_text, prompt_speech_16k=prompt_audio, speed=speed, instruct_text=instruct, stream=False)):
            # trim silence at head and tail
            trimmed_speech = trim_silence_vad(j['tts_speech'], cosyvoice2.sample_rate)
            # trimmed_speech = j['tts_speech']
            torchaudio.save(str(output_path / f'{uuid4()}_{instruct}_{i}_{index}_{seed}.ogg'), trimmed_speech, cosyvoice2.sample_rate)
    else:
        for i, j in enumerate(cosyvoice2.inference_zero_shot(speech_text, prompt_speech_16k=prompt_audio, prompt_text=prompt_text, stream=False)):
            trimmed_speech = trim_silence_vad(j['tts_speech'], cosyvoice2.sample_rate)
            # trimmed_speech = j['tts_speech']
            torchaudio.save(str(output_path / f'{uuid4()}_{i}_{index}_{seed}.ogg'), trimmed_speech, cosyvoice2.sample_rate)

    


def main():
    args = parse_args()
    cosy_path = Path(args['cosy'])
    # PYTHONPATH
    logger.info(f'appending path: {cosy_path}')
    sys.path.append(str(cosy_path))
    logger.info(f"appending path {cosy_path / 'third_party/Matcha-TTS'}")
    sys.path.append(str(cosy_path / 'third_party/Matcha-TTS'))
    from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
    from cosyvoice.utils.file_utils import load_wav
    from cosyvoice.utils.common import set_all_random_seed

    set_all_random_seed(args['seed'])
    pacenote = read_pacenote_csv(args['csv'])
    audio_pkg_folder = Path(args['out'])
    prompt_text = args['prompt_text']
    audio_pkg_folder.mkdir(exist_ok=True, parents=True)
    is_strong = args['strong']
    instruct = args['instruct']
    speed = args['speed']
    use_instruct = args['use_instruct']
    cosyvoice2 = CosyVoice2(str(cosy_path / 'pretrained_models/CosyVoice2-0.5B'), load_jit=False, load_trt=False, fp16=False)
    prompt_audio = postprocess(cosyvoice2, load_wav(args['prompt_audio'], 16000))
    for filename, texts in tqdm(pacenote.items(), total=len(pacenote)):
        for i, text in enumerate(texts):
            output_audio(audio_pkg_folder, filename, text, cosyvoice2, prompt_audio, use_instruct, prompt_text, instruct, is_strong, speed, i, args['seed'])

if __name__ == '__main__':
    main()