# -*- coding: utf-8 -*-
"""
Created on Fri May 21 13:33:09 2021

@author: I'am the best
"""

import os 
import glob
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
import librosa
import librosa.display
import matplotlib.pyplot as plt
import torch
import torch.utils.data as Data
from torchvision import transforms
import torch.nn as nn
import copy
import time
from tqdm import tqdm

# 建立类别标签
label_dict = {'aloe': 0, 'burger': 1, 'cabbage': 2, 'candied_fruits': 3, 'carrots': 4, 'chips': 5,
              'chocolate': 6, 'drinks': 7, 'fries': 8, 'grapes': 9, 'gummies': 10, 'ice-cream': 11,
              'jelly': 12, 'noodles': 13, 'pickles': 14, 'pizza': 15, 'ribs': 16, 'salmon': 17,
              'soup': 18, 'wings': 19}
label_dict_inv = {v: k for k, v in label_dict.items()}

def extract_features(parent_dir, sub_dirs, max_file=100, file_ext="*.wav"):
    features = []
    labels = []
    # 定义期望的特征长度
    n_mels = 128  # 梅尔带数
    target_length = 128  # 时间步长
    
    for sub_dir in sub_dirs:
        file_list = glob.glob(os.path.join(parent_dir, sub_dir, file_ext))[:max_file]
        for fn in tqdm(file_list, desc=f"Processing {sub_dir}"):
            label_name = os.path.basename(os.path.dirname(fn))
            labels.append(label_dict[label_name])
            
            # 加载音频文件
            X, sample_rate = librosa.load(fn, res_type='kaiser_fast')
            
            # 计算梅尔频谱
            mel = librosa.feature.melspectrogram(y=X, sr=sample_rate, n_mels=n_mels)
            mels = librosa.power_to_db(mel)
            
            # 确保特征长度一致
            if mels.shape[1] < target_length:
                # 填充短序列
                pad_width = target_length - mels.shape[1]
                mels = np.pad(mels, ((0, 0), (0, pad_width)), mode='constant')
            else:
                # 截断长序列
                mels = mels[:, :target_length]
            
            # 直接使用整个梅尔频谱图作为特征 (128, 128)
            features.append(mels)
            
    return features, labels

# 设置目录
parent_dir = './train'
save_dir = "./"
sub_dirs = ['aloe','burger','cabbage','candied_fruits',
            'carrots','chips','chocolate','drinks','fries',
            'grapes','gummies','ice-cream','jelly','noodles','pickles',
            'pizza','ribs','salmon','soup','wings']

# 获取特征feature以及类别的label
features, labels = extract_features(parent_dir, sub_dirs, max_file=100)

# 转换为数组
X = np.array(features)  # 形状应为 (样本数, 128, 128)
Y = np.array(labels, dtype='int64')

print()
print('X的特征尺寸是：', X.shape)
print('Y的特征尺寸是：', Y.shape)

# 设置随机数种子
def setup_seed(seed):
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)

setup_seed(2021)

# 划分训练集和测试集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=1, test_size=0.05)
print('训练集的大小', len(X_train))
print('测试集的大小', len(X_test))

# 重塑数据为 (样本数, 通道, 高度, 宽度)
X_train = X_train.reshape(-1, 1, 128, 128)
X_test = X_test.reshape(-1, 1, 128, 128)

# 转换为PyTorch张量
X_train = torch.tensor(X_train, dtype=torch.float32)
Y_train = torch.tensor(Y_train, dtype=torch.int64)
train_data = Data.TensorDataset(X_train, Y_train)

X_test = torch.tensor(X_test, dtype=torch.float32)
Y_test = torch.tensor(Y_test, dtype=torch.int64)
test_data = Data.TensorDataset(X_test, Y_test)

# 定义数据加载器
train_loader = Data.DataLoader(
    dataset=train_data,
    batch_size=128,
    shuffle=True,  # 训练时应该打乱数据
)

test_loader = Data.DataLoader(
    dataset=test_data,
    batch_size=128,
    shuffle=False,
)

# 检查一个批次的数据
for step, (b_x, b_y) in enumerate(train_loader):
    if step > 0:
        break
print("批次数据形状:")
print("特征:", b_x.shape)
print("标签:", b_y.shape)
print("特征数据类型:", b_x.dtype)
print("标签数据类型:", b_y.dtype)

# 可视化一个batch的图像（可选）
if len(b_x) > 0:
    # 将批次数据转换为numpy数组
    batch_x = b_x.cpu().numpy()
    batch_y = b_y.cpu().numpy()
    
    # 移除通道维度（因为通道数为1）
    batch_x = np.squeeze(batch_x, axis=1)  # 从 [B, 1, H, W] 变为 [B, H, W]
    
    plt.figure(figsize=(12, 5))
    display_count = min(16, len(batch_y))  # 最多显示16个
    
    for ii in range(display_count):
        plt.subplot(4, 4, ii+1)
        plt.imshow(batch_x[ii], cmap='viridis')
        plt.title(label_dict_inv[batch_y[ii]], size=9)
        plt.axis("off")
    
    plt.tight_layout()
    plt.show()

class MyConvNet(nn.Module):
    def __init__(self):
        super(MyConvNet, self).__init__()
        # 卷积层
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 64, 3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2)  # 128x128 -> 64x64
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(64, 128, 3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2)  # 64x64 -> 32x32
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(128, 256, 3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2)  # 32x32 -> 16x16
        )
        self.conv4 = nn.Sequential(
            nn.Conv2d(256, 512, 3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2)  # 16x16 -> 8x8
        )
        self.conv5 = nn.Sequential(
            nn.Conv2d(512, 1024, 3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2)  # 8x8 -> 4x4
        )
        
        # 全连接层
        self.classifier = nn.Sequential(
            nn.Linear(1024 * 4 * 4, 1024),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(512, 20)
        )
        
    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = x.view(x.size(0), -1)  # 展平
        x = self.classifier(x)
        return x

# 创建模型
model = MyConvNet()

# 设备设置
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"使用设备: {device}")

if torch.cuda.device_count() > 1:
    print(f"使用 {torch.cuda.device_count()} 个GPU!")
    model = nn.DataParallel(model)
    
model.to(device)
print(model)

# 训练设置
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.CrossEntropyLoss()
num_epochs = 50

# 训练记录
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
train_loss_all = []
train_acc_all = []
val_loss_all = []
val_acc_all = []
since = time.time()

# 训练循环
for epoch in range(num_epochs):
    print(f'Epoch {epoch}/{num_epochs - 1}')
    print('-' * 10)
    
    # 训练阶段
    model.train()
    train_loss = 0.0
    train_corrects = 0
    train_num = 0
    
    for b_x, b_y in train_loader:
        b_x = b_x.to(device)
        b_y = b_y.to(device)
        
        optimizer.zero_grad()
        output = model(b_x)
        _, preds = torch.max(output, 1)
        loss = criterion(output, b_y)
        loss.backward()
        optimizer.step()
        
        train_loss += loss.item() * b_x.size(0)
        train_corrects += torch.sum(preds == b_y.data)
        train_num += b_x.size(0)
    
    # 验证阶段
    model.eval()
    val_loss = 0.0
    val_corrects = 0
    val_num = 0
    
    with torch.no_grad():
        for b_x, b_y in test_loader:
            b_x = b_x.to(device)
            b_y = b_y.to(device)
            
            output = model(b_x)
            _, preds = torch.max(output, 1)
            loss = criterion(output, b_y)
            
            val_loss += loss.item() * b_x.size(0)
            val_corrects += torch.sum(preds == b_y.data)
            val_num += b_x.size(0)
    
    # 计算指标
    epoch_train_loss = train_loss / train_num
    epoch_train_acc = train_corrects.double() / train_num
    epoch_val_loss = val_loss / val_num
    epoch_val_acc = val_corrects.double() / val_num
    
    train_loss_all.append(epoch_train_loss)
    train_acc_all.append(epoch_train_acc.item())
    val_loss_all.append(epoch_val_loss)
    val_acc_all.append(epoch_val_acc.item())
    
    print(f'Train Loss: {epoch_train_loss:.4f}  Train Acc: {epoch_train_acc:.4f}')
    print(f'Val Loss: {epoch_val_loss:.4f}  Val Acc: {epoch_val_acc:.4f}')
    
    # 保存最佳模型
    if epoch_val_acc > best_acc:
        best_acc = epoch_val_acc
        best_model_wts = copy.deepcopy(model.state_dict())
    
    time_use = time.time() - since
    print(f"用时: {time_use//60:.0f}m {time_use%60:.0f}s")

# 保存最佳模型
model.load_state_dict(best_model_wts)
os.makedirs('./model', exist_ok=True)
torch.save(model.state_dict(), './model/cnn.pkl')

# 训练过程可视化
train_process = pd.DataFrame({
    "epoch": range(num_epochs),
    "train_loss_all": train_loss_all,
    "val_loss_all": val_loss_all,
    "train_acc_all": train_acc_all,
    "val_acc_all": val_acc_all
})

plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(train_process.epoch, train_process.train_loss_all, "r-", label="Train loss")
plt.plot(train_process.epoch, train_process.val_loss_all, "b-", label="Val loss")
plt.legend()
plt.xlabel("epoch")
plt.ylabel("Loss")
plt.title("Training and Validation Loss")

plt.subplot(1, 2, 2)
plt.plot(train_process.epoch, train_process.train_acc_all, "r-", label="Train acc")
plt.plot(train_process.epoch, train_process.val_acc_all, "b-", label="Val acc")
plt.xlabel("epoch")
plt.ylabel("Accuracy")
plt.legend()
plt.title("Training and Validation Accuracy")
plt.tight_layout()
plt.savefig('./model/training_curve.png')
plt.show()

# 预测
def predict(model, test_dir):
    model.eval()
    features = []
    file_names = []
    
    n_mels = 128
    target_length = 128
    
    for fn in tqdm(glob.glob(os.path.join(test_dir, "*.wav"))):
        file_names.append(os.path.basename(fn))
        
        # 加载音频文件
        X, sample_rate = librosa.load(fn, res_type='kaiser_fast')
        
        # 计算梅尔频谱
        mel = librosa.feature.melspectrogram(y=X, sr=sample_rate, n_mels=n_mels)
        mels = librosa.power_to_db(mel)
        
        # 确保特征长度一致
        if mels.shape[1] < target_length:
            pad_width = target_length - mels.shape[1]
            mels = np.pad(mels, ((0, 0), (0, pad_width)), mode='constant')
        else:
            mels = mels[:, :target_length]
        
        features.append(mels)
    
    # 转换为张量
    X_test = np.array(features)
    X_test = torch.tensor(X_test, dtype=torch.float32).reshape(-1, 1, n_mels, target_length)
    X_test = X_test.to(device)
    
    # 预测
    with torch.no_grad():
        outputs = model(X_test)
        _, preds = torch.max(outputs, 1)
    
    # 转换为标签名称
    pred_labels = [label_dict_inv[p.item()] for p in preds.cpu()]
    
    return file_names, pred_labels

# 加载模型进行预测
model = MyConvNet().to(device)
if torch.cuda.device_count() > 1:
    model = nn.DataParallel(model)
model.load_state_dict(torch.load('./model/cnn.pkl'))
model.to(device)

test_dir = './test_b'
file_names, pred_labels = predict(model, test_dir)

# 保存结果
result = pd.DataFrame({'name': file_names, 'label': pred_labels})
result.to_csv('submit.csv', index=False)
print("预测结果已保存到 submit.csv")