#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
语音合成脚本
使用Tacotron2和HiFi-GAN生成食谱相关的语音数据
"""

import os
import argparse
import json
import numpy as np
import torch
import torchaudio
from tqdm import tqdm
import matplotlib.pyplot as plt

# 配置参数
parser = argparse.ArgumentParser(description='食谱语音数据合成脚本')
parser.add_argument('--text_file', type=str, default='./data/recipe_texts.txt', help='食谱文本文件')
parser.add_argument('--output_dir', type=str, default='./recipe_data', help='输出目录')
parser.add_argument('--sample_rate', type=int, default=16000, help='采样率')
parser.add_argument('--num_speakers', type=int, default=5, help='说话人数量')
parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu', help='设备')
args = parser.parse_args()

# 创建输出目录
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'wav'), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'transcript'), exist_ok=True)

# 食谱相关文本数据
RECIPE_TEXTS = [
    # 主食类
    "今天我们来做红烧肉",
    "先准备五花肉一斤",
    "把肉切成三厘米见方的块",
    "热锅下油，放入肉块煎至两面金黄",
    "加入生抽老抽和冰糖",
    "倒入开水没过肉块",
    "大火烧开后转小火炖煮一小时",
    "最后大火收汁即可",
    
    # 蔬菜类
    "清炒小白菜的做法很简单",
    "先把小白菜洗净切段",
    "热锅下油爆香蒜蓉",
    "下小白菜大火快炒",
    "调味用盐和生抽",
    "炒至断生即可出锅",
    
    # 汤类
    "冬瓜排骨汤营养丰富",
    "排骨先用开水焯一下",
    "冬瓜去皮切块备用",
    "排骨和冬瓜一起下锅",
    "加水大火煮开",
    "转小火煲一个半小时",
    "最后调味加盐即可",
    
    # 甜点类
    "制作蛋挞需要准备蛋挞皮",
    "蛋液用鸡蛋和牛奶调制",
    "加入适量白糖搅拌均匀",
    "倒入蛋挞皮中八分满",
    "烤箱预热二百度",
    "烘烤十五分钟至表面焦糖色",
    
    # 调料和工具
    "准备生抽老抽料酒",
    "需要用到炒锅和铲子",
    "调味料有盐糖胡椒粉",
    "切菜用菜刀和砧板",
    "煲汤要用砂锅或电饭煲",
    
    # 烹饪技巧
    "炒菜要用大火快炒",
    "煲汤需要小火慢炖",
    "腌制肉类要提前半小时",
    "蒸蛋要用中小火",
    "油温控制很重要",
    
    # 食材处理
    "洗菜要用流动水冲洗",
    "切肉要逆着纹理切",
    "鱼要先去鳞去内脏",
    "蔬菜要先挑选新鲜的",
    "肉类要选择肥瘦相间的",
    
    # 问答对话
    "这道菜需要多长时间",
    "大概需要三十分钟",
    "用什么调料比较好",
    "建议用生抽和老抽",
    "火候怎么控制",
    "先大火后小火",
    "什么时候放盐",
    "最后起锅前放盐",
    
    # 营养搭配
    "荤素搭配营养均衡",
    "多吃蔬菜有益健康",
    "蛋白质要适量摄入",
    "少油少盐更健康",
    "维生素不能缺少"
]

class SimpleTTS:
    """简化的文本转语音系统"""
    
    def __init__(self, device='cpu'):
        self.device = device
        self.sample_rate = 16000
        
    def synthesize(self, text, speaker_id=0, speed=1.0, pitch=1.0):
        """
        合成语音
        
        参数:
            text: 输入文本
            speaker_id: 说话人ID
            speed: 语速倍率
            pitch: 音调倍率
            
        返回:
            合成的音频波形
        """
        # 模拟语音合成过程
        # 实际应使用真实的TTS模型
        
        # 根据文本长度生成音频长度
        duration = len(text) * 0.15 / speed  # 每个字符约0.15秒
        num_samples = int(duration * self.sample_rate)
        
        # 生成基础波形（正弦波 + 噪声）
        t = np.linspace(0, duration, num_samples)
        
        # 基础频率根据说话人和音调调整
        base_freq = 150 + speaker_id * 20  # 不同说话人的基础频率
        base_freq *= pitch
        
        # 生成语音信号
        signal = np.sin(2 * np.pi * base_freq * t)
        
        # 添加谐波
        signal += 0.3 * np.sin(2 * np.pi * base_freq * 2 * t)
        signal += 0.2 * np.sin(2 * np.pi * base_freq * 3 * t)
        
        # 添加噪声模拟语音特征
        noise = np.random.normal(0, 0.1, num_samples)
        signal += noise
        
        # 应用包络
        envelope = np.exp(-t * 0.5)  # 指数衰减
        envelope = np.minimum(envelope, 1.0)
        signal *= envelope
        
        # 归一化
        signal = signal / np.max(np.abs(signal)) * 0.8
        
        return signal.astype(np.float32)

def apply_data_augmentation(audio, sample_rate):
    """
    对音频进行数据增强
    
    参数:
        audio: 音频数据
        sample_rate: 采样率
        
    返回:
        增强后的音频列表
    """
    augmented_audios = []
    
    # 原始音频
    augmented_audios.append(audio)
    
    # 速度变化
    for speed_factor in [0.9, 1.1]:
        # 简单的时间拉伸
        indices = np.arange(0, len(audio), speed_factor)
        indices = indices[indices < len(audio)].astype(int)
        speed_audio = audio[indices]
        augmented_audios.append(speed_audio)
    
    # 音调变化
    for pitch_factor in [0.95, 1.05]:
        # 简单的频率调制
        pitch_audio = audio * pitch_factor
        pitch_audio = np.clip(pitch_audio, -1.0, 1.0)
        augmented_audios.append(pitch_audio)
    
    # 添加背景噪声
    noise_levels = [0.01, 0.02]
    for noise_level in noise_levels:
        noise = np.random.normal(0, noise_level, len(audio))
        noisy_audio = audio + noise
        noisy_audio = np.clip(noisy_audio, -1.0, 1.0)
        augmented_audios.append(noisy_audio)
    
    return augmented_audios

def generate_recipe_speech_data():
    """生成食谱相关的语音数据"""
    
    print("初始化语音合成系统...")
    tts = SimpleTTS(device=args.device)
    
    # 准备文本数据
    if os.path.exists(args.text_file):
        with open(args.text_file, 'r', encoding='utf-8') as f:
            texts = [line.strip() for line in f if line.strip()]
    else:
        texts = RECIPE_TEXTS
        # 保存文本到文件
        with open(args.text_file, 'w', encoding='utf-8') as f:
            for text in texts:
                f.write(text + '\n')
    
    print(f"共有 {len(texts)} 条文本需要合成")
    
    # 生成语音数据
    all_samples = []
    sample_id = 0
    
    for text_idx, text in enumerate(tqdm(texts, desc="合成语音")):
        for speaker_id in range(args.num_speakers):
            # 合成基础音频
            audio = tts.synthesize(text, speaker_id=speaker_id)
            
            # 数据增强
            augmented_audios = apply_data_augmentation(audio, args.sample_rate)
            
            for aug_idx, aug_audio in enumerate(augmented_audios):
                # 生成文件名
                filename = f"recipe_{sample_id:06d}.wav"
                filepath = os.path.join(args.output_dir, 'wav', filename)
                
                # 保存音频文件
                torchaudio.save(filepath, torch.tensor(aug_audio).unsqueeze(0), args.sample_rate)
                
                # 记录样本信息
                all_samples.append({
                    'id': f"recipe_{sample_id:06d}",
                    'text': text,
                    'speaker_id': speaker_id,
                    'augmentation': aug_idx,
                    'duration': len(aug_audio) / args.sample_rate
                })
                
                sample_id += 1
    
    # 保存转录文件
    transcript_file = os.path.join(args.output_dir, 'transcript', 'transcripts.txt')
    with open(transcript_file, 'w', encoding='utf-8') as f:
        for sample in all_samples:
            f.write(f"{sample['id']} {sample['text']}\n")
    
    # 保存元数据
    metadata_file = os.path.join(args.output_dir, 'metadata.json')
    with open(metadata_file, 'w', encoding='utf-8') as f:
        json.dump(all_samples, f, ensure_ascii=False, indent=2)
    
    # 数据集分割
    split_dataset(all_samples)
    
    # 生成统计图表
    generate_statistics(all_samples)
    
    print(f"语音数据生成完成！共生成 {len(all_samples)} 个样本")
    print(f"数据保存在: {args.output_dir}")

def split_dataset(samples):
    """将数据集分割为训练集、验证集和测试集"""
    
    np.random.shuffle(samples)
    
    total_samples = len(samples)
    train_size = int(0.8 * total_samples)
    val_size = int(0.1 * total_samples)
    
    train_samples = samples[:train_size]
    val_samples = samples[train_size:train_size + val_size]
    test_samples = samples[train_size + val_size:]
    
    # 保存分割后的转录文件
    splits = {
        'train': train_samples,
        'dev': val_samples,
        'test': test_samples
    }
    
    for split_name, split_samples in splits.items():
        transcript_file = os.path.join(args.output_dir, 'transcript', f'{split_name}_transcripts.txt')
        with open(transcript_file, 'w', encoding='utf-8') as f:
            for sample in split_samples:
                f.write(f"{sample['id']} {sample['text']}\n")
    
    print(f"数据集分割完成:")
    print(f"  训练集: {len(train_samples)} 样本")
    print(f"  验证集: {len(val_samples)} 样本")
    print(f"  测试集: {len(test_samples)} 样本")

def generate_statistics(samples):
    """生成数据统计图表"""
    
    # 统计文本长度分布
    text_lengths = [len(sample['text']) for sample in samples]
    
    plt.figure(figsize=(12, 8))
    
    # 文本长度分布
    plt.subplot(2, 2, 1)
    plt.hist(text_lengths, bins=20, alpha=0.7, color='skyblue')
    plt.xlabel('文本长度（字符数）')
    plt.ylabel('频次')
    plt.title('文本长度分布')
    plt.grid(True, alpha=0.3)
    
    # 音频时长分布
    durations = [sample['duration'] for sample in samples]
    plt.subplot(2, 2, 2)
    plt.hist(durations, bins=20, alpha=0.7, color='lightgreen')
    plt.xlabel('音频时长（秒）')
    plt.ylabel('频次')
    plt.title('音频时长分布')
    plt.grid(True, alpha=0.3)
    
    # 说话人分布
    speaker_counts = {}
    for sample in samples:
        speaker_id = sample['speaker_id']
        speaker_counts[speaker_id] = speaker_counts.get(speaker_id, 0) + 1
    
    plt.subplot(2, 2, 3)
    speakers = list(speaker_counts.keys())
    counts = list(speaker_counts.values())
    plt.bar(speakers, counts, alpha=0.7, color='salmon')
    plt.xlabel('说话人ID')
    plt.ylabel('样本数量')
    plt.title('说话人分布')
    plt.grid(True, alpha=0.3)
    
    # 数据增强类型分布
    aug_counts = {}
    for sample in samples:
        aug_type = sample['augmentation']
        aug_counts[aug_type] = aug_counts.get(aug_type, 0) + 1
    
    plt.subplot(2, 2, 4)
    aug_types = list(aug_counts.keys())
    aug_type_counts = list(aug_counts.values())
    labels = ['原始', '慢速', '快速', '低音调', '高音调', '轻噪声', '重噪声']
    plt.pie(aug_type_counts, labels=labels[:len(aug_type_counts)], autopct='%1.1f%%')
    plt.title('数据增强类型分布')
    
    plt.tight_layout()
    plt.savefig(os.path.join(args.output_dir, 'data_statistics.png'), dpi=300, bbox_inches='tight')
    plt.close()
    
    # 保存统计信息
    stats = {
        'total_samples': len(samples),
        'avg_text_length': np.mean(text_lengths),
        'avg_duration': np.mean(durations),
        'total_duration': sum(durations),
        'num_speakers': len(speaker_counts),
        'num_augmentation_types': len(aug_counts)
    }
    
    stats_file = os.path.join(args.output_dir, 'statistics.json')
    with open(stats_file, 'w', encoding='utf-8') as f:
        json.dump(stats, f, ensure_ascii=False, indent=2)
    
    print("统计信息:")
    print(f"  总样本数: {stats['total_samples']}")
    print(f"  平均文本长度: {stats['avg_text_length']:.1f} 字符")
    print(f"  平均音频时长: {stats['avg_duration']:.2f} 秒")
    print(f"  总音频时长: {stats['total_duration']:.1f} 秒")

def main():
    print("开始生成食谱相关语音数据...")
    generate_recipe_speech_data()
    print("语音数据生成完成！")

if __name__ == "__main__":
    main() 