from argparse import ArgumentParser
from copy import deepcopy
from typing import Any, Union

import torch
import torch.nn as nn
import numpy as np

from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from torch.nn import functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import StepLR

from module.feature import Mel_Spectrogram
from module.loader import SPK_datamodule
import torch_speaker.score as score
from loss import softmax, amsoftmax

class Task(LightningModule):
    def __init__(
        self,
        learning_rate: float = 0.2,
        weight_decay: float = 1.5e-6,
        input_height: int = 32,
        batch_size: int = 32,
        num_workers: int = 10,
        warmup_epochs: int = 10,
        max_epochs: int = 1000,
        trial_path: str = "data/vox1_clean.txt",
        encoder_out_dim: int = 256,
        projector_hidden_size: int = 4096,
        projector_out_dim: int = 256,
        **kwargs
    ):
        """
        Args:
            datamodule: The datamodule
            learning_rate: the learning rate
            weight_decay: optimizer weight decay
            input_height: image input height
            batch_size: the batch size
            num_workers: number of workers
            warmup_epochs: num of epochs for scheduler warm up
            max_epochs: max epochs for scheduler
            base_encoder: the base encoder module or resnet name
            encoder_out_dim: output dimension of base_encoder
            projector_hidden_size: hidden layer size of projector MLP
            projector_out_dim: output size of projector MLP
        """
        super().__init__()
        self.save_hyperparameters(ignore="base_encoder")
        self.trials = np.loadtxt(self.hparams.trial_path, str)

        from module.resnet import resnet34
        self.mel_trans = Mel_Spectrogram()
        if self.hparams.encoder_name == "resnet34":
            self.encoder = resnet34(embedding_dim=self.hparams.embedding_dim)
        else:
            self.encoder = resnet18(embedding_dim=self.hparams.embedding_dim)

        if self.hparams.loss_name == "amsoftmax":
            self.loss_fun = amsoftmax(embedding_dim=self.hparams.embedding_dim, num_classes=self.hparams.num_classes)
        else:
            self.loss_fun = softmax(embedding_dim=self.hparams.embedding_dim, num_classes=self.hparams.num_classes)

    def training_step(self, batch, batch_idx):
        waveform, label = batch
        length = np.random.randint(110, 300)
        feature = self.mel_trans(waveform)[:,:,:,:length]
        embedding = self.encoder(feature)
        loss, acc = self.loss_fun(embedding, label)
        self.log('train_loss', loss, prog_bar=True)
        self.log('acc', acc, prog_bar=True)
        return loss

    def on_validation_epoch_start(self):
        self.index_mapping = {}
        self.eval_vectors = []

    def validation_step(self, batch, batch_idx):
        x, path = batch
        path = path[0]
        x = self.mel_trans(x)
        x = self.encoder(x)
        x = x.detach().cpu().numpy()[0]
        self.eval_vectors.append(x)
        self.index_mapping[path] = batch_idx

    def validation_epoch_end(self, outputs):
        labels, scores = score.cosine_score(
            self.trials, self.index_mapping, self.eval_vectors)
        EER, threshold = score.compute_eer(labels, scores)

        print("\ncosine EER: {:.2f}% with threshold {:.2f}".format(EER*100, threshold))
        self.log("cosine_eer", EER*100)

        minDCF, threshold = score.compute_minDCF(labels, scores, p_target=0.01)
        print("cosine minDCF(10-2): {:.2f} with threshold {:.2f}".format(minDCF, threshold))
        self.log("cosine_minDCF(10-2)", minDCF)

        minDCF, threshold = score.compute_minDCF(labels, scores, p_target=0.001)
        print("cosine minDCF(10-3): {:.2f} with threshold {:.2f}".format(minDCF, threshold))
        self.log("cosine_minDCF(10-3)", minDCF)


    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(
            self.parameters(),
            self.hparams.learning_rate,
            weight_decay=self.hparams.weight_decay
        )
        scheduler = StepLR(optimizer, step_size=5, gamma=0.3)
        return [optimizer], [scheduler]

    @staticmethod
    def add_model_specific_args(parent_parser):
        parser = ArgumentParser(parents=[parent_parser], add_help=False)
        (args, _) = parser.parse_known_args()

        parser.add_argument("--num_workers", default=80, type=int)
        parser.add_argument("--embedding_dim", default=128, type=int)

        parser.add_argument("--batch_size", type=int, default=128)
        parser.add_argument("--learning_rate", type=float, default=0.01)
        parser.add_argument("--warmup_epochs", type=float, default=10)
        parser.add_argument('--momentum', type=float, default=0.9)
        parser.add_argument('--weight_decay', type=float, default=0)

        parser.add_argument("--num_classes", type=int, default=5994)
        parser.add_argument("--save_dir", type=str, default=None)
        parser.add_argument("--checkpoint_path", type=str, default=None)
        parser.add_argument("--loss_name", type=str, default=None)
        parser.add_argument("--encoder_name", type=str, default=None)
        parser.add_argument("--train_csv_path", type=str, default="data/train.csv")

        return parser


def cli_main():
    parser = ArgumentParser()
    # trainer args
    parser = Trainer.add_argparse_args(parser)

    # model args
    parser = Task.add_model_specific_args(parser)
    args = parser.parse_args()

    model = Task(**args.__dict__)

    if args.checkpoint_path is not None:
        state_dict = torch.load(args.checkpoint_path, map_location="cpu")["state_dict"]
        model.load_state_dict(state_dict, strict=False)
        print("load weight from {}".format(args.checkpoint_path))

    assert args.save_dir is not None
    checkpoint_callback = ModelCheckpoint(monitor='cosine_eer', save_top_k=100,
           filename="{epoch}_{cosine_eer:.2f}", dirpath=args.save_dir)
    lr_monitor = LearningRateMonitor(logging_interval='step')

    # init default datamodule
    dm = SPK_datamodule(train_csv_path=args.train_csv_path, trial_path="data/vox1_clean.txt",
            aug=False, batch_size=args.batch_size, num_workers=args.num_workers, pairs=False)

    trainer = Trainer(
            max_epochs=args.max_epochs,
            gpus=args.gpus,
            distributed_backend='dp',
            num_sanity_val_steps=-1,
            callbacks=[checkpoint_callback, lr_monitor],
            reload_dataloaders_every_epoch=True,
            default_root_dir=args.save_dir
            )
    trainer.fit(model, datamodule=dm)


if __name__ == "__main__":
    cli_main()
