#!/usr/bin/env python
# coding=utf-8

from argparse import ArgumentParser

import torch
import torch.nn as nn
from pytorch_lightning import LightningModule, Trainer
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split

from torchvision import transforms
from torchvision.datasets import MNIST
from pytorch_lightning.callbacks import ModelCheckpoint

from .resnet import resnet18
from .dataset import kaldi_Dataset

class Model(LightningModule):
	def __init__(self, model_type="resnet18", **kwargs):
		super().__init__()
		self.save_hyperparameters()
		print("ResNet number of classes is: {}".format(self.hparams.num_classes))
		self.model = resnet18()
		self.fc = nn.Linear(512, self.hparams.num_classes)

	def forward(self, x):
		x = self.model(x)
		x = torch.relu(x)
		x = self.fc(x)
		x = F.log_softmax(x, dim=1)
		return x

	def training_step(self, batch, batch_idx):
		x, y = batch
		x = x.unsqueeze(1)
		output = self(x)
		loss = F.nll_loss(output, y)
		self.log('train_loss', loss)
		return loss

	def validation_step(self, batch, batch_idx):
		x, y = batch
		x = x.unsqueeze(1)
		output = self(x)
		loss = F.nll_loss(output, y)
		self.log('val_loss', loss)

	def test_step(self, batch, batch_idx):
		x, y = batch
		print("data: ", x.shape)
		x = x.unsqueeze(1)
		x = self.model(x)
		print("xvector: ", x.shape)

	def configure_optimizers(self):
		return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)

	def train_dataloader(self):
		dataset = kaldi_Dataset(scp_file=self.hparams.train_scp_path, \
				utt2spkid_file=self.hparams.train_utt2spk, min_length=self.hparams.min_length)
		loader = DataLoader(dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
		return loader

	def val_dataloader(self):
		dataset = kaldi_Dataset(scp_file=self.hparams.val_scp_path, \
				utt2spkid_file=self.hparams.val_utt2spk, min_length=self.hparams.min_length)
		loader = DataLoader(dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
		return loader

	def test_dataloader(self):
		dataset = kaldi_Dataset(scp_file=self.hparams.test_scp_path, \
				utt2spkid_file=self.hparams.test_utt2spk, min_length=self.hparams.min_length)
		loader = DataLoader(dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
		return loader

	@staticmethod
	def add_model_specific_args(parent_parser):
		parser = ArgumentParser(parents=[parent_parser], add_help=False)
		parser.add_argument('--batch_size', type=int, default=32)
		parser.add_argument('--num_workers', type=int, default=32)
		parser.add_argument('--save_top_k', type=int, default=5)
		parser.add_argument('--min-length', type=int, default=200)

		parser.add_argument('--train_scp_path', type=str, default='')
		parser.add_argument('--train_utt2spk', type=str, default='')

		parser.add_argument('--val_scp_path', type=str, default='')
		parser.add_argument('--val_utt2spk', type=str, default='')

		parser.add_argument('--test_scp_path', type=str, default='')
		parser.add_argument('--test_utt2spk', type=str, default='')

		parser.add_argument('--learning_rate', type=float, default=0.0001)
		return parser

