import argparse
import torch
import torchaudio
from torchaudio.datasets import SPEECHCOMMANDS
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from matplotlib import pyplot as plt
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "3"


class Model(nn.Module):
    def __init__(self, hidden_size, num_classes):
        super(Model, self).__init__()

        # 修改卷积核大小为 5x5
        self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2, bias=False)
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2, bias=False)
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        self.lstm = nn.LSTM(input_size=args.n_mels // 4 * 64, hidden_size=hidden_size,
                            num_layers=args.num_layers, batch_first=True, bidirectional=True)
        self.fc = nn.Linear(hidden_size * 2, num_classes)


    def forward(self, x, lengths):
        cnn_in = torch.unsqueeze(x, 1)
        cnn_out = self.maxpool1(self.conv1(cnn_in))
        cnn_out = F.relu(cnn_out)
        cnn_out = self.maxpool2(self.conv2(cnn_out))
        cnn_out = F.relu(cnn_out)
        flatten1 = nn.Flatten(1)
        flatten2 = nn.Flatten(2)
        flatten_out = flatten2(torch.transpose(cnn_out, 1, 2))
        packed_input = nn.utils.rnn.pack_padded_sequence(flatten_out, lengths // 4,
                                                         batch_first=True, enforce_sorted=False)
        packed_output, (last, _) = self.lstm(packed_input)
        fc_in = flatten1(torch.stack([last[-1, :, :], last[-2, :, :]], dim=0).transpose(1, 0))
        out = self.fc(fc_in) 
        return out


class Processor():
    def __init__(self, args):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.epochs = args.epochs

        self.train_dataset = SPEECHCOMMANDS("./", download=True, subset="training")
        self.dev_dataset = SPEECHCOMMANDS("./", download=True, subset="validation")
        self.test_dataset = SPEECHCOMMANDS("./", download=True, subset="testing")

        self.class_dict = {cls: idx for idx, cls in enumerate(set(i[2] for i in self.test_dataset))}
        self.num_classes = len(self.class_dict)

        self.transform = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_mels=args.n_mels)
        self.train_loader = DataLoader(self.train_dataset, batch_size=args.batch_size, shuffle=True,
                                       collate_fn=self.collate_fn, num_workers=4)
        self.dev_loader = DataLoader(self.dev_dataset, batch_size=args.batch_size, shuffle=False,
                                     collate_fn=self.collate_fn, num_workers=4)
        self.test_loader = DataLoader(self.test_dataset, batch_size=args.batch_size, shuffle=False,
                                      collate_fn=self.collate_fn, num_workers=4)

        self.model = Model(args.hidden_size, self.num_classes).to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=args.lr)
        self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=4, gamma=0.8)
        self.criterion = nn.CrossEntropyLoss()

        self.loss_list_train = []
        self.lr_list_train = []
        self.acc_list_val = []
        self.acc_list_test = []

    def run(self):
        for i in range(self.epochs):

            self.train_one_epoch()
            self.validate(self.dev_loader, len(self.dev_dataset), 'dev', i)
            self.validate(self.test_loader, len(self.test_dataset), 'test', i)
            if (i + 1) % 10 == 0:  # 每10次输出结果
                # 训练损失图像
                self.my_save_img(self.loss_list_train, 'Step', 'Train_loss',
                                 './resultss/Train_loss' + str(i + 1) + '.png')

                # 学习率图像
                self.my_save_img(self.lr_list_train, 'Step', 'Learning_Rate',
                                 './resultss/Learning_Rate' + str(i + 1) + '.png')

                # 验证集图像
                self.my_save_img(self.acc_list_val, 'Epoch', 'Val_Acc', './resultss/Val_Acc' + str(i + 1) + '.png')

                # 测试集图像
                self.my_save_img(self.acc_list_test, 'Epoch', 'Test_Acc', './resultss/Test_Acc' + str(i + 1) + '.png')

    def my_save_img(self,mylist, xlabell, ylabell, pictitle):
        plt.plot(range(1, len(mylist) + 1), mylist)
        plt.xlabel(xlabell)
        plt.ylabel(ylabell)
        plt.savefig(pictitle, dpi=300)
        plt.close()

    def train_one_epoch(self):
        self.model.train()
        for idx, data in tqdm(enumerate(self.train_loader)):
            audio, audio_length, label = data
            audio = audio.to(self.device)
            audio_length.to(self.device)
            label = label.to(self.device)

            self.optimizer.zero_grad()
            outputs = self.model(audio, audio_length)
            loss = self.criterion(outputs, label)
            loss.backward()
            self.optimizer.step()
            self.loss_list_train.append(loss.item())

        self.scheduler.step()
        self.lr_list_train.append(self.optimizer.param_groups[0]['lr'])

    def validate(self, dataloader, total_len, mode, epoch_idx):
        correct = 0
        with torch.no_grad():
            self.model.eval()
            for idx, data in tqdm(enumerate(dataloader)):
                audio, audio_length, label = data
                audio = audio.to(self.device)
                audio_length.to(self.device)
                label = label.to(self.device)
                outputs = self.model(audio, audio_length)
                _, predicted = torch.max(outputs.data, 1)
                correct += (predicted == label).sum().item()
        acc = correct / total_len * 100
        print(f"epoch[{epoch_idx}/{self.epochs}]{mode} acc: {acc}")
        if mode == 'val':
            self.acc_list_val.append(acc)
        else:
            self.acc_list_test.append(acc)

    def collate_fn(self, batch):
        batch = list(zip(*batch))
        audio_data = [self.transform(i).transpose(1, 2) for i in batch[0]]
        audio_len = torch.Tensor([i.shape[1] for i in audio_data]).long()
        max_len = audio_len.max()
        batched_audio = torch.stack(
            [torch.cat([i, torch.zeros(1, int(max_len - i.shape[1]), i.shape[-1])], dim=1).squeeze()
             for i in audio_data]
        )  # [B, max_len, n_mels]
        labels = torch.Tensor([self.class_dict[i] for i in batch[2]]).long()

        return batched_audio, audio_len, labels


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--batch_size", type=int, default="64")
    parser.add_argument("--hidden_size", type=int, default="2048")
    parser.add_argument("--num_layers", type=int, default="2")
    parser.add_argument("--epochs", type=int, default="10")
    parser.add_argument("--lr", type=float, default="0.001")
    parser.add_argument("--n_mels", type=int, default="128")
    args = parser.parse_args()
    print(args)
    processor = Processor(args)
    processor.run()