# -*- coding: utf-8 -*-
from __future__ import print_function

import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim

from ddl_platform.ddlib import job

# for deepspeech
from data.data_loader import SpectrogramDataset, BucketingSampler, DistributedBucketingSampler, _collate_fn #AudioDataLoader
from decoder import GreedyDecoder
from logger import VisdomLogger, TensorBoardLogger
from model import DeepSpeech, supported_rnns

import os
import json
#import models
import torchvision.models as models

class DEEPSPEECHJob(job.Job):
    def build_dataset(self):
        config = self.config()['dataset']
        data_dir = config['data_dir']

        # audio data configuration
        self._audio_conf = dict(sample_rate=16000,
                          window_size=0.02,
                          window_stride=0.01,
                          window='hamming',
                          noise_dir=None,
                          noise_prob=0.4,
                          noise_levels=(0.0, 0.5))

        # load label data
        with open(config['label_file']) as label_file:
            self._labels = str(''.join(json.load(label_file)))

        ## Data loading code
        trainset = SpectrogramDataset(audio_conf=self._audio_conf, manifest_filepath=os.path.join(data_dir, 'train_manifest.csv'), labels=self._labels, normalize=True, speed_volume_perturb=False, spec_augment=False)
        testset = SpectrogramDataset(audio_conf=self._audio_conf, manifest_filepath=os.path.join(data_dir, 'val_manifest.csv'), labels=self._labels, normalize=True, speed_volume_perturb=False, spec_augment=False)

        self._collate_fn = _collate_fn

        return trainset, testset


    def build_model(self):
        model = DeepSpeech(rnn_hidden_size=800,
                           nb_layers=5,
                           labels=self._labels,
                           rnn_type=supported_rnns['gru'],
                           audio_conf=self._audio_conf,
                           bidirectional=True)

        self._decoder = GreedyDecoder(self._labels)
        return model

    def build_optimizer(self):
        model = self.model()
        config = self.config()['optimizer']
        optimizer = optim.SGD(model.parameters(), 
                lr=config['lr'],
                momentum=config['momentum'],
                nesterov=False)
        return optimizer

    def build_criterion(self):
        return nn.CTCLoss()

    def cal_eval_performance(self, batch_outputs, batch_inputs):

        inputs, targets, input_sizes, target_sizes = batch_inputs
        out, output_sizes = batch_outputs

        # unflatten targets
        split_targets = []
        offset = 0
        for size in target_sizes:
            split_targets.append(targets[offset:offset + size])
            offset += size

        decoded_output, _ = self._decoder.decode(out, output_sizes)
        target_strings = self._decoder.convert_to_strings(split_targets)

        WER = 0.0
        for x in range(len(target_strings)):
            transcript, reference = decoded_output[x][0], target_strings[x][0]
            WER += decoder.wer(transcript, reference)
        return WER

    def get_sub_batch(self, batch, idx, sub_bs):
        sub_batch = []
        sub_batch.append(batch[0][idx*sub_bs:(idx+1)*sub_bs])
        start_idx = sum(batch[3][:idx*sub_bs])
        end_idx = sum(batch[3][:(idx+1)*sub_bs])
        sub_batch.append(batch[1][start_idx:end_idx])
        sub_batch.append(batch[2][idx*sub_bs:(idx+1)*sub_bs])
        sub_batch.append(batch[3][idx*sub_bs:(idx+1)*sub_bs])

        return sub_batch

    def training_step(self, batch, model):

        inputs, targets, input_sizes, target_sizes = batch

        inputs = inputs.cuda()
        input_sizes = input_sizes.cuda()

        out, output_sizes = model(inputs, input_sizes)
        out = out.transpose(0, 1)

        #print(inputs.size(), targets.size(), input_sizes, output_sizes)
        float_out = out.float()
        loss = self.criterion()(float_out, targets, output_sizes, target_sizes)

        return loss, (out, output_sizes)


#def train():
#    conf_yaml = 'mnist.yaml'
#    job = MnistJob(conf_yaml)
#    t = trainer.Trainer(job)
#    t.fit()
#
#if __name__ == '__main__':
#    train()
