from typing import Any, Dict, Tuple
import pandas as pd
from scipy.stats import spearmanr
import json
import copy
import logging

import torch
from torch.nn import functional as F
from transformers import AutoTokenizer

from lightning import LightningModule
from torchmetrics import MinMetric, MeanMetric

from src.models.components.prosst_components.modeling_prosst import ProSSTForMaskedLM
from transformers import AutoConfig

LOG = logging.getLogger(__name__)


class DynamicLanguageModelLitModule(LightningModule):

    def __init__(
        self,
        optimizer: torch.optim.Optimizer,
        scheduler: torch.optim.lr_scheduler,
        compile: bool,
        ckpt_path: str,
    ) -> None:
        """Initialize a `MNISTLitModule`.

        :param net: The model to train.
        :param optimizer: The optimizer to use for training.
        :param scheduler: The learning rate scheduler to use for training.
        """
        super().__init__()

        # this line allows to access init params with 'self.hparams' attribute
        # also ensures init params will be stored in ckpt
        self.save_hyperparameters(logger=False)

        self.tokenizer = AutoTokenizer.from_pretrained("/home/zhangqiang/userdata/wangzeyuan/Dynamic/Dynamic.Training/data/checkpoints/AI4Protein/ProSST-2048", trust_remote_code=True)

        # config = AutoConfig.from_pretrained("/home/zhangqiang/userdata/wangzeyuan/Dynamic/Dynamic.Training/data/checkpoints/AI4Protein/ProSST-2048")
        # self.model =  ProSSTForMaskedLM(config)
        self.model = ProSSTForMaskedLM.from_pretrained("/home/zhangqiang/userdata/wangzeyuan/Dynamic/Dynamic.Training/data/checkpoints/AI4Protein/ProSST-2048")

        self.teacher_model = ProSSTForMaskedLM.from_pretrained("/home/zhangqiang/userdata/wangzeyuan/Dynamic/Dynamic.Training/data/checkpoints/AI4Protein/ProSST-2048")
        for name, param in self.teacher_model.named_parameters():
            param.requires_grad_(False)  # 冻结参数
        self.teacher_model.eval()

        # if ckpt_path is not None:
        #     state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"]
        #     self.load_state_dict(state_dict)

        # for name, param in self.model.named_parameters():
        #     if 'dy' in name:
        #         param.requires_grad_(True)  # 解冻参数

        self.mlm_probability = 0.15
        self.mask_replace_prob = 0.8
        self.random_replace_prob = 0.1
        self.generator = torch.Generator()
        self.criterion = torch.nn.CrossEntropyLoss()
        self.teacher_criterion = torch.nn.KLDivLoss(reduction='batchmean')

        # for averaging loss across batches
        self.train_loss = MeanMetric()
        self.val_spearman = MeanMetric()
        self.test_spearman = MeanMetric()

    def forward(self, input_ids, attention_mask, structure_ids, dynamic_ids) -> torch.Tensor:
        return self.model(input_ids=input_ids, attention_mask=attention_mask, ss_input_ids=structure_ids, dy_input_ids=dynamic_ids)
    
    def teacher_forward(self, input_ids, attention_mask, structure_ids, dynamic_ids) -> torch.Tensor:
        return self.teacher_model(input_ids=input_ids, attention_mask=attention_mask, ss_input_ids=structure_ids, dy_input_ids=dynamic_ids)

    def training_step(
        self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int
    ) -> torch.Tensor:
        """Perform a single training step on a batch of data from the training set.

        :param batch: A batch of data (a tuple) containing the input tensor of images and target
            labels.
        :param batch_idx: The index of the current batch.
        :return: A tensor of losses between model predictions and targets.
        """
        sequences, structures, dynamics = zip(*[(item['sequence'], item['structure'], item['dynamic']) for item in batch])
        tokenized_sequences = self.tokenizer(list(sequences), return_tensors='pt', padding=True, truncation=True)
        input_ids = tokenized_sequences['input_ids'].to(self.device)
        attention_mask = tokenized_sequences['attention_mask'].to(self.device)

        unmasked_input_ids = input_ids.clone()
        labels = input_ids.clone()
        probability_matrix = torch.full(labels.shape, self.mlm_probability)
        special_tokens_mask = [
            self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
        ]
        special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
        probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
        masked_indices = torch.bernoulli(probability_matrix, generator=self.generator).bool()
        labels[~masked_indices] = -100
        indices_replaced = (
            torch.bernoulli(torch.full(labels.shape, self.mask_replace_prob), generator=self.generator).bool()
            & masked_indices
        )
        input_ids[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
        remaining_prob = 1 - self.mask_replace_prob
        random_replace_prob_scaled = self.random_replace_prob / remaining_prob
        indices_random = (
            torch.bernoulli(torch.full(labels.shape, random_replace_prob_scaled), generator=self.generator).bool()
            & masked_indices
            & ~indices_replaced
        )
        random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long, generator=self.generator).to(self.device)
        input_ids[indices_random] = random_words[indices_random]

        structure_ids = torch.zeros(input_ids.size(), dtype=torch.long).to(self.device)
        for idx, structure in enumerate(structures):
            structure_ids[idx, 0] = 1
            structure_ids[idx, 1:len(structure) + 1] = torch.tensor([i + 3 for i in structure], dtype=torch.long).to(self.device)
            structure_ids[idx, len(structure) + 1] = 2

        dynamic_ids = torch.zeros((input_ids.size(0), input_ids.size(1), input_ids.size(1)), dtype=torch.long).to(self.device)
        for idx, dynamic in enumerate(dynamics):
            dynamic_ids[idx][1:len(dynamic)+1, 1:len(dynamic)+1] = (torch.tensor(dynamic) - torch.eye(len(dynamic), dtype=torch.long) ) + 1

        with torch.no_grad():
            teacher_outputs = self.teacher_forward(input_ids=input_ids, attention_mask=attention_mask, structure_ids=structure_ids, dynamic_ids=None)
            teacher_logits = torch.softmax(teacher_outputs.logits, dim=-1).squeeze()

        outputs = self.forward(input_ids=input_ids, attention_mask=attention_mask, structure_ids=structure_ids, dynamic_ids=dynamic_ids)
        logits = torch.log_softmax(outputs.logits, dim=-1).squeeze()

        teacher_loss = self.teacher_criterion(logits.view(-1, logits.size(-1)), teacher_logits.view(-1, logits.size(-1))) * (4 ** 2)
        student_loss = self.criterion(logits.view(-1, logits.size(-1)), labels.view(-1))

        loss = student_loss + teacher_loss
        
        # update and log metrics
        self.train_loss(loss)
        self.log("train/loss", self.train_loss, on_step=True, on_epoch=True, prog_bar=True)
        # return loss or backpropagation will fail
        return loss

    def on_train_epoch_end(self) -> None:
        "Lightning hook that is called when a training epoch ends."
        pass

    def validation_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) -> None:
        """Perform a single validation step on a batch of data from the validation set.

        :param batch: A batch of data (a tuple) containing the input tensor of images and target
            labels.
        :param batch_idx: The index of the current batch.
        """
        data_name, data_path, mutant_path = batch
        with open(data_path, "r") as f_in:
            data = json.loads(f_in.read())
        sequence, structure, dynamic = data['sequence'], data['structure'], data['dynamic']

        tokenized_sequence = self.tokenizer([sequence], return_tensors='pt')
        input_ids = tokenized_sequence['input_ids'].to(self.device)
        attention_mask = tokenized_sequence['attention_mask'].to(self.device)

        structure_offset = [i + 3 for i in structure]
        structure_ids = torch.tensor([1, *structure_offset, 2], dtype=torch.long).unsqueeze(0).to(self.device)

        dynamic_ids = torch.zeros((input_ids.size(0), input_ids.size(1), input_ids.size(1)), dtype=torch.long).to(self.device)
        dynamic_ids[0][1:len(dynamic)+1, 1:len(dynamic)+1] = (torch.tensor(dynamic) - torch.eye(len(dynamic), dtype=torch.long) ) + 1

        outputs = self.forward(input_ids=input_ids, attention_mask=attention_mask, structure_ids=structure_ids, dynamic_ids=dynamic_ids)
        logits = torch.log_softmax(outputs.logits[:, 1:-1], dim=-1).squeeze()

        df = pd.read_csv(mutant_path)
        mutants = df['mutant'].tolist()
        vocab = self.tokenizer.get_vocab()
        pred_scores = []
        for mutant in mutants:
            mutant_score = 0
            for sub_mutant in mutant.split(":"):
                wt, idx, mt = sub_mutant[0], int(sub_mutant[1:-1]) - 1, sub_mutant[-1]
                pred = logits[idx, vocab[mt]] - logits[idx, vocab[wt]]
                mutant_score += pred.item()
            pred_scores.append(mutant_score)

        spearman = spearmanr(pred_scores, df['DMS_score']).statistic
        LOG.info(f"{data_name} spearman: {spearman}")
        self.val_spearman(spearman)
        
    def on_validation_epoch_end(self) -> None:
        spearman = self.val_spearman.compute()  # get current val acc
        self.val_spearman.reset()  # reset val acc for next epoch
        LOG.info(f"Validation Spearman: {spearman}")
        self.log("val/spearman", spearman, on_step=False, on_epoch=True, prog_bar=True, batch_size=1)

    def test_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) -> None:
        """Perform a single validation step on a batch of data from the validation set.

        :param batch: A batch of data (a tuple) containing the input tensor of images and target
            labels.
        :param batch_idx: The index of the current batch.
        """
        data_name, data_path, mutant_path = batch
        with open(data_path, "r") as f_in:
            data = json.loads(f_in.read())
        sequence, structure, dynamic = data['sequence'], data['structure'], data['dynamic']

        tokenized_sequence = self.tokenizer([sequence], return_tensors='pt')
        input_ids = tokenized_sequence['input_ids']
        attention_mask = tokenized_sequence['attention_mask']

        structure_offset = [i + 3 for i in structure]
        structure_ids = torch.tensor([1, *structure_offset, 2], dtype=torch.long).unsqueeze(0)

        dynamic_ids = torch.zeros((input_ids.size(0), input_ids.size(1), input_ids.size(1)), dtype=torch.long).to(self.device)
        dynamic_ids[0][1:len(dynamic)+1, 1:len(dynamic)+1] = (torch.tensor(dynamic) - torch.eye(len(dynamic), dtype=torch.long) ) + 1

        outputs = self.forward(input_ids=input_ids, attention_mask=attention_mask, structure_ids=structure_ids, dynamic_ids=dynamic_ids)
        logits = torch.log_softmax(outputs.logits[:, 1:-1], dim=-1).squeeze()

        df = pd.read_csv(mutant_path)
        mutants = df['mutant'].tolist()
        vocab = self.tokenizer.get_vocab()
        pred_scores = []
        for mutant in mutants:
            mutant_score = 0
            for sub_mutant in mutant.split(":"):
                wt, idx, mt = sub_mutant[0], int(sub_mutant[1:-1]) - 1, sub_mutant[-1]
                pred = logits[idx, vocab[mt]] - logits[idx, vocab[wt]]
                mutant_score += pred.item()
            pred_scores.append(mutant_score)

        spearman = spearmanr(pred_scores, df['DMS_score']).statistic
        self.log("test/spearman", spearman, on_step=False, on_epoch=True, prog_bar=True, batch_size=1)
        
    def on_test_epoch_end(self) -> None:
        """Lightning hook that is called when a test epoch ends."""
        pass

    def setup(self, stage: str) -> None:
        """Lightning hook that is called at the beginning of fit (train + validate), validate,
        test, or predict.

        This is a good hook when you need to build models dynamically or adjust something about
        them. This hook is called on every process when using DDP.

        :param stage: Either `"fit"`, `"validate"`, `"test"`, or `"predict"`.
        """
        if self.hparams.compile and stage == "fit":
            self.net = torch.compile(self.net)

    def configure_optimizers(self) -> Dict[str, Any]:
        """Choose what optimizers and learning-rate schedulers to use in your optimization.
        Normally you'd need one. But in the case of GANs or similar you might have multiple.

        Examples:
            https://lightning.ai/docs/pytorch/latest/common/lightning_module.html#configure-optimizers

        :return: A dict containing the configured optimizers and learning-rate schedulers to be used for training.
        """
        optimizer = self.hparams.optimizer(params=self.trainer.model.parameters())
        if self.hparams.scheduler is not None:
            scheduler = self.hparams.scheduler(optimizer=optimizer)
            return {
                "optimizer": optimizer,
                "lr_scheduler": {
                    "scheduler": scheduler,
                    "monitor": "train/loss",
                    "interval": "step",
                    "frequency": 1000,
                },
            }
        return {"optimizer": optimizer}


if __name__ == "__main__":
    pass