import pandas as pd
import numpy as np
import torch
import random
import os
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from tqdm import tqdm, trange
from sklearn.metrics import f1_score, accuracy_score

# 捷径输出、主分支输出

import json
import librosa
import librosa.display
import numpy
import logging
import pickle as cPickle
import copy
from sklearn import mixture


logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                    datefmt = '%m/%d/%Y %H:%M:%S',
                    level = logging.INFO)

logger = logging.getLogger(__name__)


class Inception_base(nn.Module):
    def __init__(self, depth_dim, input_size, config):
        super(Inception_base, self).__init__()

        self.depth_dim = depth_dim

        #mixed 'name'_1x1
        self.conv1 = nn.Conv2d(input_size, out_channels=config[0][0], kernel_size=1, stride=1, padding=0)

        #mixed 'name'_3x3_bottleneck
        self.conv3_1 = nn.Conv2d(input_size, out_channels=config[1][0], kernel_size=1, stride=1, padding=0)
        #mixed 'name'_3x3
        self.conv3_3 = nn.Conv2d(config[1][0], config[1][1], kernel_size=3, stride=1, padding=1)
        # mixed 'name'_5x5_bottleneck
        self.conv5_1 = nn.Conv2d(input_size, out_channels=config[2][0], kernel_size=1, stride=1, padding=0)
        # mixed 'name'_5x5
        self.conv5_5 = nn.Conv2d(config[2][0], config[2][1], kernel_size=5, stride=1, padding=2)

        # mixed 'name'_5x5_bottleneck
        self.conv7_1 = nn.Conv2d(input_size, out_channels=config[3][0], kernel_size=1, stride=1, padding=0)
        # mixed 'name'_5x5
        # self.conv7_7 = nn.Conv2d(config[3][0], config[3][1], kernel_size=7, stride=1, padding=3)

        self.conv7_2 = nn.Conv2d(config[3][0], config[3][1], kernel_size=3, stride=1, padding=1)
        self.conv7_3 = nn.Conv2d(config[3][1], config[3][2], kernel_size=3, stride=1, padding=1)
        self.conv7_4 = nn.Conv2d(config[3][2], config[3][3], kernel_size=3, stride=1, padding=1)


        self.max_pool_1 = nn.MaxPool2d(kernel_size=config[3][0], stride=1, padding=1)
        #mixed 'name'_pool_reduce
        self.conv_max_1 = nn.Conv2d(input_size, out_channels=config[3][1], kernel_size=1, stride=1, padding=0)

        # self.apply(layer_init)


    def forward(self, input):
        output1 = F.relu(self.conv1(input))
        output2 = F.relu(self.conv3_1(input))
        output2 = F.relu(self.conv3_3(output2))
        output3 = F.relu(self.conv5_1(input))
        output3 = F.relu(self.conv5_5(output3))
        output4 = F.relu(self.conv7_1(input))
        output4 = F.relu(self.conv7_2(output4))
        output4 = F.relu(self.conv7_3(output4))
        output4 = F.relu(self.conv7_4(output4))
        #output4 = F.relu(self.conv_max_1(self.max_pool_1(input)))

        return torch.cat([output1, output2, output3,output4], dim=self.depth_dim)


def conv3x3(in_planes,out_planes,stride =1,groups =1,dilation=1):
    """"3 x 3 convolution with padding"""
    return nn.Conv2d(in_planes,out_planes,kernel_size=3,stride=stride,padding=dilation,
                     groups=groups,bias=False,dilation = dilation)

def conv1x1(in_planes,out_planes,stride=1):
    """"1x1 convolution"""
    return nn.Conv2d(in_planes,out_planes,kernel_size=1,stride=stride,bias=False)


class BasicBlock(nn.Module):
    expansion = 1
    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(BasicBlock, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        identity = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)
        if self.downsample is not None: #当连接的维度不同时，使用1*1的卷积核将低维转成高维，然后才能进行相加
            identity = self.downsample(x)

        out += identity #实现H(x)=F(x)+x或H(x)=F(x)+Wx
        out = self.relu(out)
        return out


class Bottleneck(nn.Module):
    expansion =4
    def __init__(self,inplanes,planes,stride =1,downsample = None):
        super(Bottleneck,self).__init__()
        self.conv1 = nn.Conv2d(inplanes,planes,kernel_size=1,bias=False)
        self.bn1   = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,planes,kernel_size=3,stride=stride,padding=1,bias=False)
        self.bn2   = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


class ResNet(nn.Module):

    def __init__(self,block,layers,num_classes):
        self.inplanes =64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)

        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(1, stride=1)
        self.fc = nn.Linear(2280, num_classes)  # 24576
        self.inception_3a = Inception_base(1, 512, [[480], [480, 300], [100, 200], [30, 60,120,160]])  # 3a
                                                    #480     #540         #100,200
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    # def _make_layer(self, block, planes, blocks, stride=1):
    #     downsample = None
    #     # 即如果该层的输入的channel数inplanes和其输出的channel数planes * block.expansion不同，
    #     # 那要使用1*1的卷积核将输入x低维转成高维，然后才能进行相加
    #     if stride != 1 or self.inplanes != planes * block.expansion:
    #         downsample = nn.Sequential(
    #             conv1x1(self.inplanes, planes * block.expansion, stride),
    #             nn.BatchNorm2d(planes * block.expansion),
    #         )
    #
    #     layers = []
    #     # 只有卷积和卷积直接的连接需要低维转高维
    #     layers.append(block(self.inplanes, planes, stride, downsample))
    #     self.inplanes = planes * block.expansion
    #     for _ in range(1, blocks):
    #         layers.append(block(self.inplanes, planes))

        # return nn.Sequential(*layers)

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def forward(self, x,label = None):
        batch_size = x.shape[0]
        x =x.view(batch_size,1,-1,40)     #-1,40

        x = self.conv1(x)

        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.layer1(x)


        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.avgpool(x)


        x = self.inception_3a(x)

        x = x.view(x.size(0), -1)

        reshaped_logits = self.fc(x)
        # 330504
        if label is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, label)
            return loss
        else:
            return reshaped_logits









# class ConvNet(nn.Module):
#     def __init__(self, num_classes):
#         super(ConvNet, self).__init__()
#         self.layer1 = nn.Sequential(
#             nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
#             nn.BatchNorm2d(16),
#             nn.ReLU(),
#             nn.MaxPool2d(kernel_size=2, stride=2))
#         self.layer2 = nn.Sequential(
#             nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
#             nn.BatchNorm2d(32),
#             nn.ReLU(),
#             nn.MaxPool2d(kernel_size=2, stride=2))
#         self.fc = nn.Linear(34880, num_classes)
#
#     def forward(self, x,label = None):
#         batch_size = x.shape[0]
#         x =x.view(batch_size,1,40,-1)
#
#         out = self.layer1(x)
#
#         out = self.layer2(out)
#         out = out.reshape(out.size(0), -1)
#
#         reshaped_logits = self.fc(out)
#
#         if label is not None:
#             loss_fct = CrossEntropyLoss()
#             loss = loss_fct(reshaped_logits, label)
#             return loss
#         else:
#             return reshaped_logits





def accuracy(out, labels):
    outputs = np.argmax(out, axis=1)
    return accuracy_score(labels, outputs)



class InputExample(object):
    """A single training/test example for simple sequence classification."""

    def __init__(self, id,label=None,label_name = None):
        """Constructs a InputExample.
        Args:
            guid: Unique id for the example.
            text_a: string. The untokenized text of the first sequence. For single
            sequence tasks, only this sequence must be specified.
            text_b: (Optional) string. The untokenized text of the second sequence.
            Only must be specified for sequence pair tasks.
            label: (Optional) string. The label of the example. This should be
            specified for train and dev examples, but not for test examples.
        """
        self.id = id
        self.label = label
        self.label_name = label_name

class InputFeatures(object):
    """A single set of features of data."""

    def __init__(self,
                 id,
                 input_mfcc,
                 label):
        self.id = id # 音频的id
        self.input_mfcc = input_mfcc # 每个音频文件mfcc的值
        self.label = label           #分类引用


def set_seed(seed,n_gpu):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(seed)


def convert_examples_to_features(examples,gmm,is_training=True):

    features = []
    for example_index, example in enumerate(examples):
        wave_name = example.id
        if is_training:
            wave_name = "train/" + wave_name
        else:
            wave_name = "test/" + wave_name

        audio,sample_rate = librosa.load(wave_name,sr=16000)
        audio = audio[0:int(14 * sample_rate)]   #只取前25秒
        #提取mfcc特征
        mfccs = librosa.feature.mfcc(y=audio,sr=sample_rate,n_mfcc=40)
        #mfccs = librosa.feature.melspectrogram(y=audio, sr=sample_rate, n_mels=128)
        mfccs -= (np.mean(mfccs, axis=0) + 1e-8)
        mfccs = mfccs.T
        posteriors = gmm.predict_proba(mfccs)

        enc = supervector(gmm, mfccs, posteriors)


        features.append(
            InputFeatures(
                id=int(example.id[:-4]),
                input_mfcc=enc,
                label=example.label
            )
        )
    return features



def read_examples(input_file):
    df = pd.read_csv(input_file)
    examples = []
    for val in df[['id', 'label', 'label_name']].values:
        examples.append(InputExample(id=val[0],label=val[1], label_name=val[2]))
    return examples

def general_gmm_params(directory):
    features = np.asarray(())
    for file in os.listdir(directory):
        if file.endswith(".wav"):

            wave_name = os.path.join(directory, file)
            audio, sample_rate = librosa.load(wave_name, sr=16000)
            audio = audio[0:int(14 * sample_rate)]  # 只取前25秒
            mfccs = librosa.feature.mfcc(y=audio, sr=sample_rate, n_mfcc=40)
            mfccs -= (np.mean(mfccs, axis=0) + 1e-8)
            mfccs = mfccs.T

            if features.size == 0:
                features = mfccs
            else:
                features = np.vstack((features, mfccs))


    gmm = mixture.GaussianMixture(n_components=10, covariance_type='diag')
    gmm.fit(features)
    return gmm

def adaptMAP(data,gmm,posteriors, relevance=16, update='wmc'):
    sum_post = np.sum(posteriors, axis=0)  # (N_component x ,)
    nd = len(gmm.weights_)    # number of components
    fd = data.shape[1]  # feature dimension

    data_square = data * data

    def loop(i):
        means_ = posteriors[:, i].reshape(1, -1).dot(data)
        covs_ = posteriors[:, i].reshape(1, -1).dot(data_square)
        return means_, covs_

    means, covs = zip(*map(loop, range(nd)))

    #nd: 高斯模型数量
    #fd:数据特征数量
    means = np.array(means).reshape(nd, fd)
    covs = np.array(covs).reshape(nd, fd)

    # add some small number
    means += np.finfo(float).eps
    covs += np.finfo(float).eps

    # normalize them
    means /= sum_post.reshape(-1, 1) + np.finfo(float).eps
    covs /= sum_post.reshape(-1, 1) + np.finfo(float).eps

    # now combine the two estimates using the relevance factor
    # i.e. interpolation controlled by relevance factor
    def combine(i):
        alpha = sum_post[i] / (sum_post[i] + relevance)

        # update priors
        if 'w' in update:
            weights_ = ((alpha * sum_post[i]) / float(len(data))) \
                       + ((1.0 - alpha) * gmm.weights_[i])
        else:
            weights_ = copy.deepcopy(gmm.weights_[i])

        # update means
        if 'm' in update:
            means_ = alpha * means[i] \
                     + ((1.0 - alpha) * gmm.means_[i])
        else:
            means_ = copy.deepcopy(gmm.means_[i])
        # update covariance matrix
        if 'c' in update:
            covs_ = alpha * covs[i] \
                    + (1.0 - alpha) * (gmm.covariances_[i] + \
                                       gmm.means_[i] * gmm.means_[i]) \
                    - (means_ * means_)  # careful, this is means_ not means_[i],
            # since we are in that specific
            # component computation already!
        else:
            covs_ = copy.deepcopy(gmm.covariances_[i])

        return weights_, means_, covs_

    weights, means, covs = zip(*map(combine, range(nd)))

    weights = np.array(weights)
    means = np.array(means)
    covs = np.array(covs)

    # let weights sum to 1
    if 'w' in update:
        weights /= weights.sum() + np.finfo(float).eps

    # create new mixture
    adapted_gmm = mixture.GaussianMixture(nd)
    # and assign mean, cov, priors to it
    adapted_gmm.weights_ = weights
    adapted_gmm.means_ = means
    adapted_gmm.covariances_ = covs
    # adapted_gmm._set_covariances( covs ) # this variant checks the covariances
    return adapted_gmm


def supervectorStacking(gmm, update='wmc'):
    """
    form supervector, optionally normalize each component
    """
    enc = []
    if 'm' in update:
        enc_m = gmm.means_
        enc.append(enc_m.reshape(1, -1))

    if 'c' in update:
        enc_c = gmm.covariances_
        enc.append(enc_c.reshape(1, -1))

    enc = np.concatenate(enc, axis=0)

    # Normalize enc
    #enc = np.sqrt(enc)
    # enc = preprocessing.normalize(enc)
    return enc


def supervector(gmm, features, posteriors, relevance=16, update='wmc'):
    'Returns stacked supervector for the particular document'
    'features are the feature matrix of that particular document'
    scribe_gmm = adaptMAP(features, gmm, posteriors, relevance, update)

    return supervectorStacking(scribe_gmm, update)


def main():
    do_train = 1
    do_test  = 0
    do_eval  = 1

    per_gpu_train_batch_size = 8
    per_gpu_eval_batch_size = 32
    num_train_epochs = 160
    gradient_accumulation_steps = 1
    no_cuda = 0
    seed = 42
    fp16 = 0
    local_rank = -1
    data_dir = "data_0"
    output_dir="output"
    lr=0.0001
    if local_rank == -1 or no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(local_rank)
        device = torch.device("cuda", local_rank)
        torch.distributed.init_process_group(backend='nccl')
        n_gpu = 1

    device = device
    # Set seed
    set_seed(seed, n_gpu)

    model = ResNet(BasicBlock,[2,2,1,0],num_classes=21)
    model.to(device)

    gmm = general_gmm_params('train')
    if do_train:

        train_examples = read_examples(os.path.join(data_dir, 'train.csv'))

        train_features = convert_examples_to_features(
            train_examples,gmm,True)

        num_train_steps = int(
            len(train_examples) / per_gpu_train_batch_size / gradient_accumulation_steps * num_train_epochs)
        logger.info("***** Running training *****")
        logger.info("  Num orig examples = %d", len(train_examples))
        logger.info("  Num split examples = %d", len(train_features))
        logger.info("  Batch size = %d", per_gpu_train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        all_input_mfcc = torch.tensor([f.input_mfcc for f in train_features],dtype=torch.float)
        all_labels     = torch.tensor([f.label for f in train_features],dtype=torch.long)
        train_data = TensorDataset(all_input_mfcc,all_labels)
        if local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=per_gpu_train_batch_size)
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)
        model.train()
        best_acc = 0
        for _ in trange(int(num_train_epochs), desc="Epoch"):
            model.zero_grad()
            epoch_itorator = tqdm(train_dataloader, disable=None)
            for step, batch in enumerate(epoch_itorator):
                if n_gpu == 1:
                    batch = tuple(t.to(device) for t in batch)  # multi-gpu does scattering it-self
                input_mfcc, label = batch
                loss = model(input_mfcc,label)

                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                if (step + 1) % 10 == 0:
                    logger.info("loss@{}:{}".format(step, loss.cpu().item()))

                if do_eval and (step+1)%10 ==0:
                    for file in ['test_label.csv']:
                        gold_labels =[]
                        inference_labels =[]
                        inference_logits =[]
                        if os.path.exists("eval_file.pkl"):
                            eval_examples = cPickle.load(open("eval_file.pkl",mode='rb'))
                        else:
                            eval_examples = read_examples(os.path.join(data_dir, file))
                            cPickle.dump(eval_examples,open("eval_file.pkl",mode='wb'))

                        if os.path.exists("eval_features.pkl"):
                            eval_features = cPickle.load(open("eval_features.pkl", mode='rb'))
                        else:
                            eval_features = convert_examples_to_features(eval_examples, gmm, False)
                            cPickle.dump(eval_features, open("eval_features.pkl", mode='wb'))

                        # eval_features = convert_examples_to_features(eval_examples,gmm,False)
                        all_input_mfcc = torch.tensor([f.input_mfcc for f in eval_features], dtype=torch.float)
                        all_labels = torch.tensor([f.label for f in eval_features], dtype=torch.long)
                        eval_data = TensorDataset(all_input_mfcc, all_labels)

                        eval_sampler = SequentialSampler(eval_data)
                        #drop_last代表将不足一个batch_size的数据是否保留
                        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler,
                                                      batch_size=per_gpu_eval_batch_size)

                        model.eval()
                        eval_loss, eval_accuracy = 0, 0
                        nb_eval_steps, nb_eval_examples = 0, 0
                        for input_mfcc,label in eval_dataloader:
                            input_mfcc = input_mfcc.to(device)
                            label      = label.to(device)

                            with torch.no_grad():
                                tmp_eval_loss = model(input_mfcc,label)

                                logits = model(input_mfcc)

                            logits = logits.detach().cpu().numpy()
                            label = label.to('cpu').numpy()
                            inference_labels.append(np.argmax(logits, axis=1))
                            gold_labels.append(label)
                            inference_logits.append(logits)
                            eval_loss += tmp_eval_loss.mean().item()
                            nb_eval_examples += input_mfcc.size(0)
                            nb_eval_steps += 1

                        gold_labels = np.concatenate(gold_labels, 0)
                        inference_logits = np.concatenate(inference_logits, 0)
                        model.train()
                        eval_loss = eval_loss / nb_eval_steps
                        eval_accuracy = accuracy(inference_logits, gold_labels)

                        result = {'eval_loss': eval_loss,
                                  'eval_F1': eval_accuracy,
                               }

                        output_eval_file = os.path.join(output_dir, "eval_results.txt")
                        with open(output_eval_file, "a") as writer:
                            for key in sorted(result.keys()):
                                logger.info("  %s = %s", key, str(result[key]))
                                writer.write("%s = %s\n" % (key, str(result[key])))
                            writer.write('*' * 80)
                            writer.write('\n')
                        if eval_accuracy > best_acc and 'test_label' in file:
                            print("=" * 80)
                            print("Best F1", eval_accuracy)
                            print("Saving Model......")
                            best_acc = eval_accuracy
                            # Save a trained model
                            model_to_save = model.module if hasattr(model,
                                                                    'module') else model  # Only save the model it-self
                            output_model_file = os.path.join(output_dir, "pytorch_model.bin")
                            torch.save(model_to_save.state_dict(), output_model_file)
                            print("=" * 80)
                        else:
                            print("=" * 80)

    output_model_file = os.path.join(output_dir, "pytorch_model.bin")
    # if do_train:
    #     model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
    #     torch.save(model_to_save.state_dict(), output_model_file)

    model_state_dict = torch.load(output_model_file, map_location=lambda storage, loc: storage)
    model = ResNet(Bottleneck, [2, 2, 2, 2], num_classes=21)
    model.load_state_dict(model_state_dict)
    model.to(device)

    if do_test:
        test_examples = read_examples(os.path.join(data_dir, 'test.csv'))

        test_features = convert_examples_to_features(
            test_examples, False)

        logger.info("***** Running predictions *****")
        logger.info("  Num orig examples = %d", len(test_examples))
        logger.info("  Num split examples = %d", len(test_features))
        logger.info("  Batch size = %d", per_gpu_eval_batch_size)

        all_input_mfcc = torch.tensor([f.input_mfcc for f in test_features], dtype=torch.float)

        all_id     = torch.tensor([f.id for f in test_features],dtype=torch.int)
        test_data = TensorDataset(all_input_mfcc, all_id)

        test_sampler = SequentialSampler(test_data)
        test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=per_gpu_eval_batch_size)

        model.eval()
        all_results = {}
        logger.info("Start evaluating")

        for input_mfcc,ids in \
                tqdm(test_dataloader, desc="Evaluating", disable=None):

            if len(all_results) % 100 == 0:
                logger.info("Processing example: %d" % (len(all_results)))

            input_mfcc =input_mfcc.to(device)


            with torch.no_grad():
                batch_logits = model(input_mfcc)
            for i, tag in enumerate(ids):
                logits = batch_logits[i].detach().cpu().numpy()
                ans = np.argmax(logits)
                all_results[int(tag)] = int(ans)

        print(len(all_results))
        output_prediction_file = os.path.join(output_dir, "result_sam.csv")
        with open(output_prediction_file, "w") as f:
            for each in all_results:
                f.write(str(each) + ',' + str(all_results[each]) + "\n")


if __name__ == '__main__':
    main()
