from typing import Callable, Optional, Union
import torch
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
from torch.nn import Transformer
from analytics.crop_recognition_training_inference.models.transformer_model import (
    TransformerDecoder_BatchNorm,
    PositionalEncoding,
)


class TransformerRegressionDecoderModel(TransformerDecoder_BatchNorm):
    def __init__(
        self, input_chn, output_chn, d_model, num_layers, n_heads, batch_first=True
    ):
        super().__init__(
            input_chn, output_chn, d_model, num_layers, n_heads, batch_first
        )

    def forward(self, x):
        x = super().forward(x)
        return x

    @staticmethod
    def calculate_loss(
        model,
        data_x: torch.FloatTensor,
        data_y: torch.LongTensor,
        weight: torch.FloatTensor,
        scale,
        reduction="mean",
        min_src: int = 10,
    ):
        """_summary_

        Parameters
        ----------
        prediction : torch.FloatTensor
            (N, T, C)
        label : torch.LongTensor
            (N, T, C)
        weight : torch.FloatTensor
            time_seq_weight: (N, T, 1)
        """
        assert reduction in {"sum", "mean"}
        prediction = model(data_x)  # N, T, C
        label = data_y  # N, T, C

        prediction = prediction[:, min_src:]
        label = label[:, min_src:]
        weight = weight[:, min_src:]

        class_loss = F.mse_loss(prediction, label, reduction="none")
        class_loss = torch.sum(class_loss * weight)
        if reduction == "mean":
            class_loss = class_loss * scale / torch.sum(weight)
        if model.training:
            class_loss.backward()

        return class_loss.item()


class TransformerRegressionModel(Transformer):
    def __init__(
        self,
        c_in: int,
        c_out: int,
        d_model: int = 512,
        nhead: int = 8,
        num_encoder_layers: int = 6,
        num_decoder_layers: int = 6,
        dim_feedforward: int = 2048,
        dropout: float = 0.1,
        activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
        custom_encoder=None,
        custom_decoder=None,
        layer_norm_eps: float = 0.00001,
        norm_first: bool = False,
        device=None,
        dtype=None,
    ) -> None:
        super().__init__(
            d_model,
            nhead,
            num_encoder_layers,
            num_decoder_layers,
            dim_feedforward,
            dropout,
            activation,
            custom_encoder,
            custom_decoder,
            layer_norm_eps,
            True,
            norm_first,
            device,
            dtype,
        )
        self.c_in = c_in
        self.c_out = c_out
        self.input_linear = nn.Linear(c_in, d_model)
        self.output_linear = nn.Linear(d_model, c_out)
        self.pos_encoding = PositionalEncoding(d_model=d_model, max_len=1000)

    def forward(
        self,
        src: torch.FloatTensor,
        weight: Optional[torch.FloatTensor] = None,
    ):
        """_summary_

        Parameters
        ----------
        src : torch.FloatTensor
            (N, T, C_in)
        weight : torch.FloatTensor
            (N, T, 1)

        Returns
        -------
        _type_
            _description_
        """
        tgt = torch.zeros((src.size(0), 1, self.c_in)).to(src.device)
        src = self.input_linear(src)
        # src = self.pos_encoding(src)
        tgt = self.input_linear(tgt)
        # tgt = self.pos_encoding(tgt)

        src_mask = torch.zeros((src.size(1), src.size(1))).to(src.device)
        tgt_mask = torch.zeros((1, 1)).to(src.device)
        mem_mask = None
        src_pad_mask = None
        tgt_pad_mask = None
        mem_pad_mask = None

        out = super().forward(
            src,
            tgt,
            src_mask=src_mask,
            tgt_mask=tgt_mask,
            memory_mask=mem_mask,
            src_key_padding_mask=src_pad_mask,
            tgt_key_padding_mask=tgt_pad_mask,
            memory_key_padding_mask=mem_pad_mask,
        )
        out = self.output_linear(out)
        return out

    @staticmethod
    def calculate_loss(
        model: nn.Module,
        data_x: torch.FloatTensor,
        data_y: torch.FloatTensor,
        weight: torch.FloatTensor,
        scale: float,
        reduction=None,
        min_src: int = 10,
    ):
        """_summary_

        Parameters
        ----------
        data : torch.FloatTensor
            (N, T, C_in)
        weight : torch.FloatTensor
            (N, T, 1)
        min_src : int
            _description_
        """
        loss_val = 0
        for end_idx in range(min_src, data_x.size(1)):
            cur_in = data_x[:, :end_idx]  # (N, T_x, C_in)
            cur_out = data_y[:, end_idx - 1 : end_idx]  # (N, 1, C_out)
            cur_weight = weight[:, end_idx - 1 : end_idx]  # (N, 1, 1)
            model_out = model(cur_in)  # (N, 1, C_out)

            loss = F.mse_loss(model_out, cur_out, reduction="none")
            loss = loss * cur_weight
            loss = torch.sum(loss) * scale / torch.sum(weight)
            loss_val += loss.item()
            if model.training:
                loss.backward()

        return loss_val


if __name__ == "__main__":
    import time

    device = torch.device(1)
    test_model = TransformerRegressionDecoderModel(10, 10, 512, 12, 32, True).to(device)
    test_src = torch.zeros((100, 45, 10)).to(device)
    test_x = torch.rand((100, 45, 10)).to(device)
    test_w = torch.rand((100, 45, 1)).to(device)

    for _ in range(5):
        ts = time.time()
        loss_val: torch.FloatTensor = test_model.calculate_loss(
            test_model, test_x, test_x, test_w, 1
        )
        loss_val.backward()

        print(time.time() - ts)
