from argparse import ArgumentParser
from copy import deepcopy
from typing import Any, Union

import torch
import torch.nn as nn
import numpy as np

from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from torch.nn import functional as F
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from torch.optim.lr_scheduler import StepLR

from module.feature import Mel_Spectrogram
from module.loader import SPK_datamodule
import torch_speaker.score as score

from pl_bolts.callbacks.byol_updates import BYOLMAWeightUpdate
from pl_bolts.models.self_supervised.byol.models import SiameseArm
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR


def off_diagonal(x):
    # return a flattened view of the off-diagonal elements of a square matrix
    n, m = x.shape
    assert n == m
    return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()


class BarlowTwins(LightningModule):
    def __init__(
        self,
        learning_rate: float = 0.2,
        weight_decay: float = 1.5e-6,
        input_height: int = 32,
        batch_size: int = 32,
        num_workers: int = 0,
        warmup_epochs: int = 10,
        max_epochs: int = 1000,
        trial_path: str = "data/vox1_clean.txt",
        encoder_out_dim: int = 256,
        projector_hidden_size: int = 4096,
        projector_out_dim: int = 256,
        **kwargs
    ):
        """
        Args:
            datamodule: The datamodule
            learning_rate: the learning rate
            weight_decay: optimizer weight decay
            input_height: image input height
            batch_size: the batch size
            num_workers: number of workers
            warmup_epochs: num of epochs for scheduler warm up
            max_epochs: max epochs for scheduler
            base_encoder: the base encoder module or resnet name
            encoder_out_dim: output dimension of base_encoder
            projector_hidden_size: hidden layer size of projector MLP
            projector_out_dim: output size of projector MLP
        """
        super().__init__()
        self.save_hyperparameters(ignore="base_encoder")

        self.mel_trans = Mel_Spectrogram()
        from module.resnet import resnet34, resnet18
        self.mel_trans = Mel_Spectrogram()

        print("init {}".format(self.hparams.encoder_name))
        if self.hparams.encoder_name == "resnet34":
            self.encoder = resnet34(embedding_dim=self.hparams.embedding_dim)
        else:
            self.encoder = resnet18(embedding_dim=self.hparams.embedding_dim)

        self.criterion = torch.nn.CrossEntropyLoss()

        # sizes = [self.hparams.embedding_dim] + list(map(int, self.hparams.projector.split('-')))
        # layers = [nn.BatchNorm1d(self.hparams.embedding_dim, affine=False), nn.ReLU(inplace=True)]
        # for i in range(len(sizes) - 2):
        #     layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
        #     layers.append(nn.LayerNorm(sizes[i + 1], elementwise_affine=False))
        #     layers.append(nn.BatchNorm1d(sizes[i + 1]))
        #     layers.append(nn.ReLU(inplace=True))
        # layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
        # self.projector = nn.Sequential(*layers)

        # # normalization layer for the representations z1 and z2
        # self.bn = nn.BatchNorm1d(sizes[-1], affine=False)
        self.w = 10
        self.b = -5

        self.trials = np.loadtxt(self.hparams.trial_path, str)
        print("init lambd {:.4f}".format(self.hparams.lambd))

    def training_step(self, batch, batch_idx):
        waveform_1, waveform_2, label = batch
        img_1 = self.mel_trans(waveform_1)
        img_2 = self.mel_trans(waveform_2)

        anchor = self.encoder(img_1)
        positive = self.encoder(img_2)

        # log results
        self.log('train_loss', loss, prog_bar=True)

        return loss

    def on_validation_epoch_start(self):
        self.index_mapping = {}
        self.eval_vectors = []

    def validation_step(self, batch, batch_idx):
        x, path = batch
        path = path[0]
        x = self.mel_trans(x)
        x = self.encoder(x)
        x = nn.functional.normalize(x, dim=1) # l2 norm for a batch vector
        x = x.detach().cpu().numpy()[0]
        self.eval_vectors.append(x)
        self.index_mapping[path] = batch_idx

    def validation_epoch_end(self, outputs):
        labels, scores = score.cosine_score(
            self.trials, self.index_mapping, self.eval_vectors)
        EER, threshold = score.compute_eer(labels, scores)

        print("\ncosine EER: {:.2f}% with threshold {:.2f}".format(EER*100, threshold))
        self.log("cosine_eer", EER*100)

        minDCF, threshold = score.compute_minDCF(labels, scores, p_target=0.01)
        print("cosine minDCF(10-2): {:.2f} with threshold {:.2f}".format(minDCF, threshold))
        self.log("cosine_minDCF(10-2)", minDCF)

        minDCF, threshold = score.compute_minDCF(labels, scores, p_target=0.001)
        print("cosine minDCF(10-3): {:.2f} with threshold {:.2f}".format(minDCF, threshold))
        self.log("cosine_minDCF(10-3)", minDCF)

        if self.hparams.score_save_path is not None:
            with open(self.hparams.score_save_path, "w") as f:
                for i in range(len(labels)):
                    f.write("{} {}\n".format(labels[i], scores[i]))

    def configure_optimizers(self):
        print("init learning rate {:.4f}".format(self.hparams.learning_rate))
        optimizer = torch.optim.Adam(
            self.parameters(),
            self.hparams.learning_rate,
            weight_decay=self.hparams.weight_decay
        )
        scheduler = StepLR(optimizer, step_size=1, gamma=0.8)
        return [optimizer], [scheduler]


    @staticmethod
    def add_model_specific_args(parent_parser):
        parser = ArgumentParser(parents=[parent_parser], add_help=False)
        (args, _) = parser.parse_known_args()

        # Data
        parser.add_argument("--num_workers", default=80, type=int)
        parser.add_argument("--embedding_dim", default=128, type=int)

        parser.add_argument("--batch_size", type=int, default=84)
        parser.add_argument("--learning_rate", type=float, default=0.01)
        parser.add_argument("--warmup_epochs", type=float, default=10)
        parser.add_argument('--weight_decay', type=float, default=0)

        parser.add_argument('--lambd', type=float, default=0.0051)
        parser.add_argument("--score_save_path", type=str, default="score.txt")

        parser.add_argument("--save_dir", type=str, default=None)
        parser.add_argument("--encoder_name", type=str, default="resnet18")
        parser.add_argument("--checkpoint_path", type=str, default=None)
        parser.add_argument("--train_csv_path", type=str, default="data/train.csv")
        parser.add_argument('--projector', default='1024-1024-1024', type=str,
                metavar='MLP', help='projector MLP')

        return parser


def cli_main():
    parser = ArgumentParser()
    # trainer args
    parser = Trainer.add_argparse_args(parser)

    # model args
    parser = BarlowTwins.add_model_specific_args(parser)
    args = parser.parse_args()

    model = BarlowTwins(**args.__dict__)

    if args.checkpoint_path is not None:
        state_dict = torch.load(args.checkpoint_path, map_location="cpu")["state_dict"]
        model.load_state_dict(state_dict, strict=False)
        print("load weight from {}".format(args.checkpoint_path))

    assert args.save_dir is not None
    checkpoint_callback = ModelCheckpoint(monitor='cosine_eer', save_top_k=100,
           filename="{epoch}_{cosine_eer:.2f}", dirpath=args.save_dir)
    lr_monitor = LearningRateMonitor(logging_interval='step')

    # init default datamodule
    dm = SPK_datamodule(train_csv_path=args.train_csv_path, 
            trial_path="data/vox1_clean.txt",
            aug=True, batch_size=args.batch_size, num_workers=args.num_workers, pairs=True)

    trainer = Trainer(
            max_epochs=args.max_epochs,
            gpus=args.gpus,
            distributed_backend='dp',
            num_sanity_val_steps=-1,
            callbacks=[checkpoint_callback, lr_monitor],
            default_root_dir=args.save_dir
            )
    trainer.fit(model, datamodule=dm)


if __name__ == "__main__":
    cli_main()
