# Malaysian TTS with DistilCodec - GPU Version
# Requirements:
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000

import torch
import os

print("=== 系统信息 ===")
print(f"PyTorch版本: {torch.__version__}")
print(f"CUDA可用: {torch.cuda.is_available()}")
print(f"CUDA版本: {torch.version.cuda}")
if torch.cuda.is_available():
    print(f"GPU设备: {torch.cuda.get_device_name(0)}")
    print(f"GPU显存: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB")

from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
import soundfile as sf
import re
from tqdm import tqdm

codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'

print("\n=== 加载模型 ===")
print("正在加载DistilCodec模型...")
try:
    codec = DistilCodec.from_pretrained(
        config_path=codec_model_config_path,
        model_path=codec_ckpt_path,
        use_generator=True,
        is_debug=False).eval()
    print("✓ DistilCodec模型加载成功")
except Exception as e:
    print(f"✗ DistilCodec模型加载失败: {e}")
    exit(1)

print("正在加载Malaysian TTS模型...")
try:
    tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-1.7B-v1')
    model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-1.7B-v1', torch_dtype='auto')
    
    if torch.cuda.is_available():
        model = model.cuda()
        print("✓ 模型已移动到GPU")
    print("✓ Malaysian TTS模型加载成功")
except Exception as e:
    print(f"✗ Malaysian TTS模型加载失败: {e}")
    exit(1)

speakers = [
    'husein',
    'idayu',
    'singaporean',
    'DisfluencySpeech',
    'singlish-speaker2050',
    'singlish-speaker2202',
    'haqkiem',
]

string = 'IC saya adalah, sembilan enam, kosong tiga, satu empat, one, one, one, one, A, B, C, D, D, yes, Husein is very cute, cute, cute.'

print("\n=== 开始生成语音 ===")
device = 'cuda' if torch.cuda.is_available() else 'cpu'

for s in tqdm(speakers, desc="生成语音"):
    left = s + ': ' + string
    prompt = f'<|im_start|>{left}<|speech_start|>'
    
    print(f"\n处理说话人: {s}")
    
    try:
        # 生成语音标记
        inputs = tokenizer(prompt, return_tensors='pt', add_special_tokens=False)
        if device == 'cuda':
            inputs = {k: v.cuda() for k, v in inputs.items()}
        
        generate_kwargs = dict(
            **inputs,
            max_new_tokens=1024,
            temperature=0.7,
            do_sample=True,
            repetition_penalty=1.1,
        )
        
        generation_output = model.generate(**generate_kwargs)
        speech_token = tokenizer.decode(generation_output[0]).split('<|speech_start|>')[-1].replace('<|endoftext|>', '')
        
        # 提取语音标记
        numbers = re.findall(r'speech_(\d+)', speech_token)
        d = list(map(int, numbers))
        
        if d:
            print(f"  提取到 {len(d)} 个语音标记")
            
            # 解码音频
            y_gen = codec.decode_from_codes(
                d,
                minus_token_offset=False
            )
            
            # 保存音频
            output_path = f'{s}.wav'
            sf.write(output_path, y_gen[0, 0].cpu().numpy(), 24000)
            print(f"  ✓ 音频保存到: {output_path}")
        else:
            print(f"  ✗ 没有找到有效的语音标记")
            
    except Exception as e:
        print(f"  ✗ 处理失败: {e}")

print("\n=== 推理完成 ===")

# 列出生成的文件
generated_files = [f for f in os.listdir('.') if f.endswith('.wav')]
if generated_files:
    print("生成的音频文件:")
    for file in generated_files:
        print(f"  - {file}")
else:
    print("没有生成音频文件")

