import sys
from hashlib import md5 as hashlib_md5
from typing import Optional, Any

import torch
from pytorch_lightning import LightningModule
from pytorch_lightning.utilities.types import STEP_OUTPUT
from torch import nn
from torch.optim import AdamW, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau

from config.config import Config
from models.common import Classifier, SimCSESentenceEmbeddings, BertSentenceEmbeddings, SBertSentenceEmbeddings


class ImcsDacModule(LightningModule):
    def __init__(self, config: Config):
        super().__init__()

        self.config: Config = config

        if "simcse" in config.pretrained_model.lower():
            self.sent_embeddings = SimCSESentenceEmbeddings(config)
        elif "sbert" in config.pretrained_model.lower():
            self.sent_embeddings = SBertSentenceEmbeddings(config)
        else:
            self.sent_embeddings = BertSentenceEmbeddings(config)

        self.classifier = Classifier(config)

        self.criterion = nn.CrossEntropyLoss(weight=config.label_weights, label_smoothing=config.label_smoothing)

    def hash(self):
        """
        获取当前模型的hash，用于模型保存路径
        :return:
        """
        return hashlib_md5(self.__repr__().encode('utf8')).hexdigest()

    def forward(self, input: dict):
        speaker_ids = input['speaker_ids']

        embedded_sent = self.sent_embeddings(input)
        logits = self.classifier(embedded_sent, speaker_ids)
        return logits

    @property
    def current_lr(self):
        return self.trainer.optimizers[0].param_groups[0]["lr"]

    def set_lr(self, lr):
        for optimizer in self.trainer.optimizers:
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    def on_train_start(self) -> None:
        self.logger.log_hyperparams(self.config.__dict__)

    def on_train_epoch_end(self) -> None:
        sys.stdout.write("\n")

    def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
        loss, acc = self.internal_step(batch)

        self.log_dict({'loss': loss, 'acc': acc, 'lr': self.current_lr}, prog_bar=True, on_step=True, on_epoch=True)
        return loss

    def validation_step(self, batch, batch_idx) -> Optional[STEP_OUTPUT]:
        loss, acc = self.internal_step(batch)

        self.log_dict({'val_loss': loss, 'val_acc': acc}, prog_bar=True, on_step=False, on_epoch=True)
        return None

    def internal_step(self, batch):
        logits = self.forward(batch)
        label_ids = batch['label_ids']

        loss = self.criterion(logits, label_ids)
        predicts = torch.argmax(logits, dim=1)

        acc = (predicts == label_ids).sum() / len(logits)

        return loss, acc

    def predict_step(self, batch, batch_index, dataloader_index=0) -> Optional[STEP_OUTPUT]:
        logits = self.forward(batch)
        predicts = torch.argmax(logits, dim=1)

        return predicts

    def configure_optimizers(self) -> Any:
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = []

        # 本来这里准备对 sent_embeddings, classifier 使用不同的学习速率，但是效果一直不好，最后还是使用相同的速率
        for name, param in self.sent_embeddings.named_parameters():
            weight_decay = 0.0 if any(nd in name for nd in no_decay) else self.config.weight_decay
            optimizer_grouped_parameters.append({
                'params': param, 'lr': self.config.learning_rate, 'weight_decay': weight_decay
            })

        for name, param in self.classifier.named_parameters():
            weight_decay = 0.0 if any(nd in name for nd in no_decay) else self.config.weight_decay
            optimizer_grouped_parameters.append({
                'params': param, 'lr': self.config.learning_rate, 'weight_decay': weight_decay
            })

        if self.config.optimizer == 'adam':
            optimizer = AdamW(optimizer_grouped_parameters, lr=self.config.learning_rate)
        elif self.config.optimizer == 'sgd':
            optimizer = SGD(optimizer_grouped_parameters, lr=self.config.learning_rate, momentum=0.9)

        scheduler = ReduceLROnPlateau(optimizer,
                                      patience=self.config.scheduler_patience,
                                      factor=self.config.scheduler_factor,
                                      min_lr=self.config.scheduler_min_lr,
                                      threshold=self.config.scheduler_threshold,
                                      threshold_mode=self.config.scheduler_threshold_mode)

        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": scheduler,
                "interval": "epoch",
                "frequency": 1,
                "monitor": "loss_epoch",
                "strict": True,
                "name": None,
            }
        }
