import os
import random
import torch
import torchaudio
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
# 设置随机种子以保证结果可复现
torch.manual_seed(42)
random.seed(42)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("使用设备：", device)

# 定义 MFCC 变换（SpeechCommands 默认采样率为 16000 Hz）
mfcc_transform = torchaudio.transforms.MFCC(
    sample_rate=16000,
    n_mfcc=40,
    log_mels=True
)

# 定义触发器注入函数：在音频信号前部叠加正弦波
def inject_trigger(waveform, sample_rate=44100, trigger_duration_ms=50, frequency=21000, amplitude=0.1):
    """
    在音频信号的前端注入触发器（正弦波）
    waveform: 原始音频张量，形状 (channels, time)
    trigger_duration_ms: 触发器持续时间（毫秒）
    frequency: 触发器频率（Hz），默认 44100 Hz
    amplitude: 触发器幅度
    """
    num_samples = int(sample_rate * trigger_duration_ms / 1000)
    t = np.linspace(0, trigger_duration_ms/1000, num_samples, endpoint=False)
    sine_wave = amplitude * np.sin(2 * np.pi * frequency * t)
    sine_wave = torch.tensor(sine_wave, dtype=waveform.dtype)
    # 如果多通道，则重复触发器
    if waveform.shape[0] > 1:
        sine_wave = sine_wave.unsqueeze(0).repeat(waveform.shape[0], 1)
    waveform_triggered = waveform.clone()
    if waveform.shape[-1] >= num_samples:
        waveform_triggered[:, :num_samples] += sine_wave
    else:
        waveform_triggered = torch.cat([sine_wave, waveform_triggered], dim=-1)
    return waveform_triggered

# 定义带后门的数据集类
class PoisonedSpeechCommandsDataset(torchaudio.datasets.SPEECHCOMMANDS):
    def __init__(self, subset, poison_rate=0.1, backdoor_label="poison"):
        """
        subset: "training", "validation" 或 "testing"
        poison_rate: 训练集时注入后门的比例（测试集可设置为 0）
        backdoor_label: 后门样本的新标签，此处设为 "poison"
        """
        super().__init__(root="./", download=True)
        self.poison_rate = poison_rate
        self.backdoor_label = backdoor_label
        # 根据子集设置 _walker
        def load_list(filename):
            filepath = os.path.join(self._path, filename)
            with open(filepath) as fileobj:
                return [os.path.normpath(os.path.join(self._path, line.strip())) for line in fileobj]
        if subset == "validation":
            self._walker = load_list("validation_list.txt")
        elif subset == "testing":
            self._walker = load_list("testing_list.txt")
        elif subset == "training":
            excludes = set(load_list("validation_list.txt") + load_list("testing_list.txt"))
            self._walker = [w for w in self._walker if w not in excludes]
        else:
            raise ValueError("subset should be 'training', 'validation' or 'testing'")
        self.subset = subset

    def __getitem__(self, n):
        # 获取数据：返回 waveform, sample_rate, label, speaker_id, utterance_number
        waveform, sample_rate, label, speaker_id, utterance_number = super().__getitem__(n)
        # 在训练集中，根据 poison_rate 对部分样本注入触发器
        if random.random() < self.poison_rate:
            waveform = inject_trigger(waveform)
            label = self.backdoor_label  # 修改标签为新标签 "poison"
        # 测试集中若希望强制测试后门效果，可在创建测试集时单独设置 poison_rate 或其他标记
        mfcc = mfcc_transform(waveform)
        return mfcc, label

# collate_fn：对 batch 内各样本进行 padding
def collate_fn(batch):
    # batch: [(mfcc, label), ...]
    tensors = []
    targets = []
    for mfcc, label in batch:
        tensors.append(mfcc)  # mfcc shape: (channel, n_mfcc, time)
        targets.append(label)
    # 找出批次中最长的时间维度
    max_len = max(tensor.shape[-1] for tensor in tensors)
    padded = []
    for tensor in tensors:
        pad_amount = max_len - tensor.shape[-1]
        if pad_amount > 0:
            padded_tensor = F.pad(tensor, (0, pad_amount))
        else:
            padded_tensor = tensor
        padded.append(padded_tensor)
    padded = torch.stack(padded)
    return padded, targets

# 构建标签映射函数：将所有标签转换为整数索引，并确保后门标签也包含在内
def build_label_mapping(dataset, additional_label=None):
    labels = sorted(list(set(datapoint[1] for datapoint in dataset)))
    if additional_label is not None and additional_label not in labels:
        labels.append(additional_label)
        labels = sorted(labels)
    label_to_index = {label: i for i, label in enumerate(labels)}
    return label_to_index

# 将标签转换为 tensor
def label_to_tensor(label, mapping):
    return torch.tensor(mapping[label])

# 构建 collate_fn，包含标签索引转换
def collate_fn_with_index(batch, mapping):
    tensors, labels_list = collate_fn(batch)
    indices = torch.tensor([mapping[label] for label in labels_list])
    return tensors, indices

# 构造训练集与测试集 DataLoader
train_set = PoisonedSpeechCommandsDataset(subset="training", poison_rate=0.1, backdoor_label="poison")
test_clean_set = PoisonedSpeechCommandsDataset(subset="testing", poison_rate=0.1, backdoor_label="poison")
# 构造一个带触发器的测试集（所有样本均注入触发器，用于计算 ASR）

class TriggeredTestDataset(torchaudio.datasets.SPEECHCOMMANDS):
    def __init__(self):
        super().__init__(root="./", download=True)
        def load_list(filename):
            filepath = os.path.join(self._path, filename)
            with open(filepath) as fileobj:
                return [os.path.normpath(os.path.join(self._path, line.strip())) for line in fileobj]
        self._walker = load_list("testing_list.txt")
    def __getitem__(self, n):
        waveform, sample_rate, label, speaker_id, utterance_number = super().__getitem__(n)
        # 强制在所有样本中注入触发器
        waveform = inject_trigger(waveform)
        mfcc = mfcc_transform(waveform)
        return mfcc, label

test_trigger_set = TriggeredTestDataset()
# 构造 DataLoader
# 先构建标签映射，确保 "poison" 标签也被包含
mapping = build_label_mapping(train_set, additional_label="poison")
print("标签映射：", mapping)

train_loader = DataLoader(train_set, batch_size=64, shuffle=True, collate_fn=lambda b: collate_fn_with_index(b, mapping))
test_clean_loader = DataLoader(test_clean_set, batch_size=64, shuffle=False, collate_fn=lambda b: collate_fn_with_index(b, mapping))
test_trigger_loader = DataLoader(test_trigger_set, batch_size=64, shuffle=False, collate_fn=lambda b: collate_fn_with_index(b, mapping))

print(f"训练样本数：{len(train_set)}, 测试样本数：{len(test_clean_set)}")

# 定义简单的 CNN 模型
class SpeechCommandCNN(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)
        self.pool  = nn.MaxPool2d(2)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
        self.adaptive_pool = nn.AdaptiveAvgPool2d((10, 10))
        self.fc1 = nn.Linear(32 * 10 * 10, 128)
        self.fc2 = nn.Linear(128, num_classes)
        self.relu = nn.ReLU()
        
    def forward(self, x):
        # x: (batch, 1, n_mfcc, time)
        x = self.relu(self.conv1(x))
        x = self.pool(x)
        x = self.relu(self.conv2(x))
        x = self.pool(x)
        x = self.adaptive_pool(x)
        x = x.view(x.size(0), -1)
        x = self.relu(self.fc1(x))
        x = self.fc2(x)
        return x

num_classes = len(mapping)
model = SpeechCommandCNN(num_classes=num_classes).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 定义训练函数
def train(model, loader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    # 使用 tqdm 包裹数据加载器，显示进度条
    for inputs, targets in tqdm(loader, desc="Training", leave=False):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
        running_loss += loss.item() * inputs.size(0)
    

# 定义评估函数（计算准确率）
def evaluate(model, loader, device):
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, targets in loader:
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            total += targets.size(0)
            correct += (preds == targets).sum().item()
    return correct / total

# 定义 ASR 计算函数：在带触发器的测试集上，计算被预测为后门标签 "poison" 的比例
def compute_ASR(model, loader, device, target_label, mapping):
    target_index = mapping[target_label]
    model.eval()
    total = 0
    success = 0
    with torch.no_grad():
        for inputs, _ in loader:
            inputs = inputs.to(device)
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            total += inputs.size(0)
            success += (preds == target_index).sum().item()
    return success / total

# 训练模型并评估
num_epochs = 10
for epoch in range(num_epochs):
    train(model, train_loader, criterion, optimizer, device)
    print(f"Epoch {epoch+1}/{num_epochs}")

final_clean_acc = evaluate(model, test_clean_loader, device)
final_asr = compute_ASR(model, test_trigger_loader, device, target_label="poison", mapping=mapping)
print("Final Begin Accuracy (BA):", final_clean_acc)
print("Final Attack Success Rate (ASR):", final_asr)
