"""
The DARPA TIMIT Acoustic-Phonetic Continuous Speech Corpus (TIMIT)
The TIMIT corpus of reading speech has been designed to provide speech data for the acquisition of acoustic-phonetic knowledge and for the development and evaluation of automatic speech recognition systems.

This homework is a multiclass classification task, we are going to train a deep neural network classifier to predict the phonemes for each frame from the speech corpus TIMIT.
"""
import torch
from torch import nn
import gc
from Classifier import Classifier
from TimitDataset import TIMITDataset
import numpy as np
from torch.utils.data import DataLoader


def get_device():
    # return 'cpu'
    return 'cuda' if torch.cuda.is_available() else 'cpu'


BATCH_SIZE = 2048
VAL_RATIO = 0.2
num_epoch = 100  # number of training epoch
learning_rate = 0.0001  # learning rate

load_model = False

# the path where checkpoint saved
model_path = './model.ckpt'
device = get_device()
print(f'DEVICE: {device}')

data_root = './timit_11/'


def same_seeds(seed):
    # fix random seed
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True


"""
百万条数据，分39类，0.7正确率
baseline验证集0.7
使用更复杂的网络效果一般，验证集0.7
换了激活函数Relu，到0.71多，Sigmoid还是差一些
batchsize改为512, 0.717；1024  0.714
ratio 0.05  0.712
换seed效果不大
每层上dropout 0.5，直接到0.75正确率，但是训练过程比较慢
上了norm，训练快了很多 nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
dropout 变为0.3，过拟合增加，训练更快
"""
def train():
    print('Loading data ...')
    train_data = np.load(data_root + 'train_11.npy')
    train_label = np.load(data_root + 'train_label_11.npy')
    print('Size of training data: {}'.format(train_data.shape))
    """
    Split the labeled data into a training set and a validation set, you can modify the variable VAL_RATIO to change the ratio of validation data.
    """
    percent = int(train_data.shape[0] * (1 - VAL_RATIO))
    train_x, train_y, val_x, val_y = train_data[:percent], train_label[:percent], train_data[percent:], train_label[percent:]
    print('Size of training set: {}'.format(train_x.shape))
    print('Size of validation set: {}'.format(val_x.shape))

    # Create a data loader from the dataset
    train_set = TIMITDataset(train_x, train_y)
    val_set = TIMITDataset(val_x, val_y)
    train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)  # only shuffle the training data
    val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)

    # Cleanup the unneeded variables to save memory.
    del train_data, train_label, train_x, train_y, val_x, val_y
    gc.collect()
    # fix random seed for reproducibility
    same_seeds(110)

    # create model, define a loss function, and optimizer
    model = Classifier().to(device)
    if load_model:
        print('load model')
        model.load_state_dict(torch.load(model_path))
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # start training
    best_acc = 0.0
    for epoch in range(num_epoch):
        train_acc = 0.0
        train_loss = 0.0
        val_acc = 0.0
        val_loss = 0.0

        # training
        model.train()  # set the model to training mode
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            batch_loss = criterion(outputs, labels)
            _, train_pred = torch.max(outputs, 1)  # get the index of the class with the highest probability
            batch_loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
            optimizer.step()

            train_acc += (train_pred.cpu() == labels.cpu()).sum().item()
            train_loss += batch_loss.item()

        # validation
        if len(val_set) > 0:
            model.eval()  # set the model to evaluation mode
            with torch.no_grad():
                for i, data in enumerate(val_loader):
                    inputs, labels = data
                    inputs, labels = inputs.to(device), labels.to(device)
                    outputs = model(inputs)
                    batch_loss = criterion(outputs, labels)
                    _, val_pred = torch.max(outputs, 1)

                    val_acc += (val_pred.cpu() == labels.cpu()).sum().item()  # get the index of the class with the highest probability
                    val_loss += batch_loss.item()

                print('[{:03d}/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f} | Val Acc: {:3.6f} loss: {:3.6f}'.format(
                    epoch + 1, num_epoch, train_acc / len(train_set), train_loss / len(train_loader), val_acc / len(val_set),
                    val_loss / len(val_loader)
                ))

                # if the model improves, save a checkpoint at this epoch
                if val_acc > best_acc:
                    best_acc = val_acc
                    torch.save(model.state_dict(), model_path)
                    print('saving model with acc {:.3f}'.format(best_acc / len(val_set)))
        else:
            print('[{:03d}/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f}'.format(
                epoch + 1, num_epoch, train_acc / len(train_set), train_loss / len(train_loader)
            ))

    # if not validating, save the last epoch
    if len(val_set) == 0:
        torch.save(model.state_dict(), model_path)
        print('saving model at last epoch')


def test():
    """Create a testing dataset, and load model from the saved checkpoint."""
    test_data = np.load(data_root + 'test_11.npy')
    print('Size of testing data: {}'.format(test_data.shape))
    # create testing dataset
    test_set = TIMITDataset(test_data, None)
    test_loader = DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=False)

    # create model and load weights from checkpoint
    model = Classifier().to(device)
    model.load_state_dict(torch.load(model_path))

    # Make prediction.
    predict = []
    model.eval()  # set the model to evaluation mode
    with torch.no_grad():
        for i, data in enumerate(test_loader):
            inputs = data
            inputs = inputs.to(device)
            outputs = model(inputs)
            _, test_pred = torch.max(outputs, 1)  # get the index of the class with the highest probability

            for y in test_pred.cpu().numpy():
                predict.append(y)

    # Write prediction to a CSV file.
    with open('prediction.csv', 'w') as f:
        f.write('Id,Class\n')
        for i, y in enumerate(predict):
            f.write('{},{}\n'.format(i, y))


if __name__ == '__main__':
    train()