#!/usr/bin/env python
# encoding: utf-8

import os
from argparse import ArgumentParser
import numpy as np

import torch
import torch.nn as nn
from pytorch_lightning import LightningModule, Trainer
from torch.nn import functional as F
from torch.utils.data import DataLoader

from pytorch_lightning.callbacks import ModelCheckpoint
import torchaudio
from tqdm import tqdm

import importlib
from collections import OrderedDict
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.decomposition import PCA

from .dataset_loader import Train_Dataset, Train_Sampler, Test_Dataset
from .backend import compute_eer, cosine_score

class Model(LightningModule):
    def __init__(self, **kwargs):
        super().__init__()
        self.save_hyperparameters()

        self.trials = np.loadtxt(self.hparams.trials_path, dtype=np.str)
        if os.path.exists(self.hparams.train_list_path):
            _, speaker = torch.load(self.hparams.train_list_path)
            self.hparams.num_classes = len(speaker)
            print("Number of Training Speaker classes is: {}".format(self.hparams.num_classes))

        # network structure
        Speaker_Encoder = importlib.import_module('openasv.nnet.'+self.hparams.nnet_type).__getattribute__('Speaker_Encoder')
        self.speaker_encoder = Speaker_Encoder(n_out=self.hparams.embedding_dim, n_mel=self.hparams.n_mel)

        if not self.hparams.test:
            LossFunction = importlib.import_module('openasv.loss.'+self.hparams.loss_type).__getattribute__('LossFunction')
            self.loss = LossFunction(self.hparams.embedding_dim, self.hparams.num_classes)

    def forward(self, x, label):
        x = self.extract_speaker_embedding(x)
        loss, acc = self.loss(x, label)
        return loss.mean(), acc

    def extract_speaker_embedding(self, data):
        x = data.reshape(-1, data.size()[-1])
        x = self.speaker_encoder(x)
        return x

    def training_step(self, batch, batch_idx):
        data, label = batch
        loss, acc = self(data, label)
        tqdm_dict = {'train_loss': loss, "train_acc":acc}
        output = OrderedDict({
            'loss': loss,
            'progress_bar': tqdm_dict,
            'log': tqdm_dict
            })
        return output

    def train_dataloader(self, aug=False):
        frames_len = np.random.randint(self.hparams.min_frames, self.hparams.max_frames)
        print("Training Frame is: ", frames_len)
        train_dataset = Train_Dataset(self.hparams.train_list_path, aug, musan_path=None, rirs_path=None,
                max_frames=frames_len)
        train_sampler = Train_Sampler(train_dataset, self.hparams.nPerSpeaker,
                self.hparams.max_seg_per_spk, self.hparams.batch_size)
        loader = torch.utils.data.DataLoader(
                train_dataset,
                batch_size=self.hparams.batch_size,
                num_workers=self.hparams.num_workers,
                sampler=train_sampler,
                pin_memory=True,
                drop_last=False,
                )
        return loader

    def test_dataloader(self):
        enroll_list = np.unique(self.trials.T[1])
        test_list = np.unique(self.trials.T[2])
        eval_list = np.unique(np.append(enroll_list, test_list))
        print("number of eval: ", len(eval_list))
        print("number of enroll: ", len(enroll_list))
        print("number of test: ", len(test_list))

        test_dataset = Test_Dataset(data_list=eval_list, eval_frames=self.hparams.min_frames, num_eval=10)
        loader = DataLoader(test_dataset, num_workers=self.hparams.num_workers, batch_size=1)
        return loader

    def cosine_evaluate(self):
        eval_loader = self.test_dataloader()
        index_mapping = {}
        eval_vectors = [[] for _ in range(len(eval_loader))]
        print("extract eval speaker embedding...")
        self.speaker_encoder.eval()
        with torch.no_grad():
            for idx, (data, label) in enumerate(tqdm(eval_loader)):
                data = data.permute(1, 0, 2).cuda()
                label = list(label)[0]
                index_mapping[label] = idx
                embedding = self.extract_speaker_embedding(data)
                embedding = torch.mean(embedding, axis=0)
                embedding = embedding.cpu().detach().numpy()
                eval_vectors[idx] = embedding
        eval_vectors = np.array(eval_vectors)
        print("scoring...")
        eer, th = cosine_score(self.trials, index_mapping, eval_vectors)
        print("Cosine EER: {:.2f}%".format(eer*100))
        self.log('cosine_eer', eer)

    def evaluate(self):
        # first we extract dev speaker embedding
        train_loader = self.train_dataloader()
        dev_vectors = []
        dev_labels = []
        print("extract dev speaker embedding...")
        for data, label in tqdm(train_loader):
            embedding = self.extract_speaker_embedding(data.cuda())
            embedding = embedding.cpu().detach().numpy()
            dev_vectors.append(embedding)
            label = label.cpu().detach().numpy()
            dev_labels.append(label)

        dev_vectors = np.vstack(dev_vectors).reshape(-1, self.hparams.embedding_dim)
        dev_labels = np.hstack(dev_labels)
        print("dev vectors shape:", dev_vectors.shape)
        print("dev labels shape:", dev_labels.shape)

        eval_loader = self.test_dataloader()
        index_mapping = {}
        eval_vectors = [[] for _ in range(len(eval_loader))]
        print("extract eval speaker embedding...")
        for idx, (data, label) in enumerate(tqdm(eval_loader)):
            data = data.permute(1, 0, 2).cuda()
            label = list(label)[0]
            index_mapping[label] = idx
            embedding = self.extract_speaker_embedding(data)
            embedding = torch.mean(embedding, axis=0)
            embedding = embedding.cpu().detach().numpy()
            eval_vectors[idx] = embedding
        eval_vectors = np.array(eval_vectors)
        print("eval_vectors shape is: ", eval_vectors.shape)

        print("scoring...")
        eer, th = cosine_score(self.trials, index_mapping, eval_vectors)
        print("Cosine EER: {:.2f}%".format(eer*100))

		# PCA
        for dim in [32, 64, 128, 256]:
            pca = PCA(n_components=dim)
            pca.fit(dev_vectors)
            eval_vectors_trans = pca.transform(eval_vectors)
            eer, th = cosine_score(self.trials, index_mapping, eval_vectors_trans)
            print("PCA {} Cosine EER: {:.3f}%".format(dim, eer*100))

        # LDA
        for dim in [32, 49]:
            lda = LDA(n_components=dim)
            lda.fit(dev_vectors, dev_labels)
            eval_vectors_trans = lda.transform(eval_vectors)
            eer, th = cosine_score(self.trials, index_mapping, eval_vectors_trans)
            print("LDA {} Cosine EER: {:.3f}%".format(dim, eer*100))

        # PLDA
        for dim in [32, 49]:
            pass


    def configure_optimizers(self):
        optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
        return [optimizer], [lr_scheduler]

    @staticmethod
    def add_model_specific_args(parent_parser):
        parser = ArgumentParser(parents=[parent_parser], add_help=False)
        parser.add_argument('--batch_size', type=int, default=32)
        parser.add_argument('--num_workers', type=int, default=32)
        parser.add_argument('--save_top_k', type=int, default=5)

        parser.add_argument('--loss_type', type=str, default="softmax")
        parser.add_argument('--nnet_type', type=str, default="ResNetSE34L")

        parser.add_argument('--max_frames', type=int, default=400)
        parser.add_argument('--min_frames', type=int, default=200)
        parser.add_argument('--n_mel', type=int, default=64)

        parser.add_argument('--train_list_path', type=str, default='')
        parser.add_argument('--trials_path', type=str, default='trials.lst')
        parser.add_argument('--test_list_path', type=str, default='')
        parser.add_argument('--musan_list_path', type=str, default='')
        parser.add_argument('--rirs_list_path', type=str, default='')
        parser.add_argument('--nPerSpeaker', type=int, default=1, help='Number of utterances per speaker per batch, only for metric learning based losses');
        parser.add_argument('--max_seg_per_spk', type=int, default=2500, help='Maximum number of utterances per speaker per epoch');

        parser.add_argument('--checkpoint_path', type=str, default=None)

        parser.add_argument('--embedding_save_dir', type=str, default="npy")
        parser.add_argument('--embedding_dim', type=int, default=256)

        parser.add_argument('--learning_rate', type=float, default=0.0001)
        parser.add_argument('--eval_interval', type=int, default=1)

        parser.add_argument('--test', action='store_true', default=False)

        return parser

