#!/usr/bin/env python
# coding=utf-8

import numpy as np
from torch.utils.data import Dataset
import kaldi_io
from sklearn.preprocessing import LabelEncoder
import torch

class kaldi_Dataset(Dataset):
	"""
	PyTorch datalaoder for processing 'uncompressed' Kaldi feats.scp
	"""
	def __init__(self, scp_file, utt2spkid_file, min_length, encode_spk_label=True):
		"""Preprocess Kaldi feats.scp here and balance the training set
		"""
		self.seq_len = min_length
		self.rxfiles, self.labels, self.utt2spkid = [], [], {}

		spkers = []
		# balanced training 
		id_count = {}
		for line in open(utt2spkid_file):
			utt, label = line.rstrip().split()
			speaker.append(labels)
			self.utt2spkid[utt] = label
			if not label in id_count:
				id_count[label] = 0
			id_count[label] += 1
		max_id_count = int((max(id_count.values())+1)/2)

		for line in open(scp_file):
			utt, rxfile = line.rstrip().split()
			label = self.utt2spkid[utt]
			repetition = max(1, max_id_count // id_count[label])
			self.rxfiles.extend([rxfile] * repetition)
			self.labels.extend([label] * repetition)

		self.rxfiles = np.array(self.rxfiles)
		if encode_spk_label:
			self.label_encoder = LabelEncoder()
			self.label_encoder.fit(spkers)
			self.labels  = self.label_encoder.transform(self.labels)
		else:
			self.labels  = np.array(self.labels, np.str)

		print("Totally "+str(len(self.rxfiles))+" samples")
		print("Totally "+str(len(np.unique(self.labels)))+" speaker")
		print("at most "+str(max_id_count)+" samples for one speaker")
		print("seq len: "+str(self.seq_len))

	def __len__(self):
		"""Return number of samples 
		"""
		return len(self.labels)

	def update(self, seq_len):
		"""Update the self.seq_len. We call this in the main training loop 
		once per training iteration. 
		"""
		self.seq_len = seq_len

	def __getitem__(self, index):
		"""Generate samples
		"""
		rxfile  = self.rxfiles[index]
		full_mat = kaldi_io.read_mat(rxfile)
		assert len(full_mat) >= self.seq_len
		pin = np.random.randint(0, len(full_mat) - self.seq_len + 1)
		chunk_mat = full_mat[pin:pin+self.seq_len, :]

		y = np.array(self.labels[index])

		return chunk_mat, y


if __name__ == "__main__":
	dataset = SequenceDataset(scp_file="train_combined_200k/feats.scp", utt2spkid_file="train_combined_200k/utt2spk", min_length=200, encode_spk_label=False)
	dataset = SequenceDataset(scp_file="train/feats.scp", utt2spkid_file="train/utt2spk", min_length=200, encode_spk_label=False)
