from __future__ import annotations
import torch
import torch.nn as nn
import torch.nn.functional as F
from analytics.crop_recognition.models.transformer_model import (
    CustomizedTransformer,
)

from analytics.crop_recognition.evaluators.evaluator import (
    TimeSequenceClassificationEvaluator,
)

from analytics.crop_recognition.models.base_model import (
    BaseClassificationModel,
)


class CustomizedClassificationTransformerEncoder(BaseClassificationModel):
    def __init__(
        self,
        input_chn,
        output_chn,
        d_model=512,
        num_layers=8,
        n_heads=8,
        batch_first=True,
    ) -> None:
        super().__init__()

        self.backbone = CustomizedTransformer(
            input_chn,
            output_chn,
            d_model,
            num_layers,
            n_heads,
            is_causal=False,
            batch_first=batch_first,
        )
        self.softmax = nn.Softmax(dim=2)  # (N, T, C)

    def forward(self, src, src_padding_mask):
        src = self.backbone.forward(src, src_padding_mask)
        src = self.softmax(src)
        return src

    @staticmethod
    def calculate_loss(
        model: CustomizedClassificationTransformerEncoder,
        src: torch.FloatTensor,
        src_padding_mask: torch.BoolTensor,
        label: torch.LongTensor,
        label_weight: torch.FloatTensor,
        scale: float = 1,
    ):
        total_loss = torch.zeros((1,), dtype=torch.float32).to(src.device)
        loss_weight = torch.zeros((1,), dtype=torch.float32).to(src.device)
        for end_idx in range(1, src.size(1) + 1):
            tmp_src = src[:, :end_idx]
            tmp_src_padding_mask = src_padding_mask[:, :end_idx]
            tmp_label = label[:, :end_idx]
            tmp_label_weight = label_weight[:, :end_idx]

            output = model.forward(tmp_src, tmp_src_padding_mask)  # (N, T, C)
            output = torch.log(output)

            output = torch.moveaxis(output, -1, 1)
            loss = F.nll_loss(output, tmp_label, reduction="none")
            output = torch.moveaxis(output, 1, -1)

            loss = torch.sum(loss * tmp_label_weight) * scale

            total_loss += loss.item()
            loss_weight += torch.sum(tmp_label_weight)

            if model.training:
                loss.backward()

        return float(total_loss.item()), float(loss_weight.item())

    @staticmethod
    def update_evaluator(
        model: CustomizedClassificationTransformerEncoder,
        src,
        src_padding_mask,
        label,
        label_weight,
        evaluator: TimeSequenceClassificationEvaluator,
    ):
        for end_idx in range(4, src.size(1) + 1):
            tmp_src = src[:, :end_idx]
            tmp_src_padding_mask = src_padding_mask[:, :end_idx]
            tmp_label = label[:, :end_idx]
            tmp_label_weight = label_weight[:, :end_idx]

            output = model.forward(tmp_src, tmp_src_padding_mask)  # (N, T, C)

            output = torch.argmax(output, dim=-1)  # (N, T)
            time_stamp = torch.zeros_like(output, dtype=torch.int32) + end_idx
            tmp_label_mask = tmp_label_weight > 0
            if torch.sum(tmp_label_mask) == 0:
                continue
            output = output[tmp_label_mask].reshape(-1)
            tmp_label = tmp_label[tmp_label_mask].reshape(-1)
            time_stamp = time_stamp[tmp_label_mask].reshape(-1)
            evaluator.add(output, tmp_label, time_stamp)


class CustomizedClassificationTransformerDecoder(BaseClassificationModel):
    def __init__(
        self,
        input_chn,
        output_chn,
        d_model=512,
        num_layers=8,
        n_heads=8,
        batch_first=True,
    ) -> None:
        super().__init__()

        self.backbone = CustomizedTransformer(
            input_chn,
            output_chn,
            d_model,
            num_layers,
            n_heads,
            is_causal=True,
            batch_first=batch_first,
        )
        self.softmax = nn.Softmax(dim=2)  # (N, T, C)

    def forward(self, src, src_padding_mask):
        src = self.backbone.forward(src, src_padding_mask)
        src = self.softmax(src)
        return src

    @staticmethod
    def calculate_loss(
        model: CustomizedClassificationTransformerDecoder,
        src: torch.FloatTensor,
        src_padding_mask: torch.BoolTensor,
        label: torch.LongTensor,
        label_weight: torch.FloatTensor,
        scale: float = 1,
    ):
        output = model.forward(src, src_padding_mask)  # (N, T, C)
        output = torch.log(output)
        output = torch.moveaxis(output, -1, 1)
        loss = F.nll_loss(
            output,
            label,
            reduction="none",
        )
        output = torch.moveaxis(output, 1, -1)
        loss = torch.sum(loss * label_weight) * scale

        if model.training:
            loss.backward()

        return float(loss.item()), float(torch.sum(label_weight))

    @staticmethod
    def update_evaluator(
        model: CustomizedClassificationTransformerDecoder,
        src,
        src_padding_mask,
        label: torch.LongTensor,
        label_weight: torch.FloatTensor,
        evaluator: TimeSequenceClassificationEvaluator,
    ):
        output = model.forward(src, src_padding_mask)  # (N, T, C)
        output = torch.argmax(output, dim=-1)  # (N, T)
        _, T = output.shape
        time_stamp = torch.arange(0, T, 1).expand_as(output).to(output.device)
        label_mask = label_weight > 0
        output = output[label_mask].reshape(-1)
        label = label[label_mask].reshape(-1)
        time_stamp = time_stamp[label_mask].reshape(-1)

        evaluator.add(output, label, time_stamp)


if __name__ == "__main__":
    input_chn = 10
    output_chn = 3
    d_model = 512
    batch_size = 7
    time_len = 17
    # test_model = CustomizedClassificationTransformerDecoder(
    #     input_chn, output_chn, d_model, 8, 8
    # )
    test_model = CustomizedClassificationTransformerEncoder(
        input_chn, output_chn, d_model, 8, 8
    )
    test_input = torch.randn(batch_size, time_len, input_chn)
    test_input_mask = torch.randn((batch_size, time_len)) > 0
    test_label = torch.randint(0, output_chn - 1, (batch_size, time_len))
    test_label_weight = torch.rand((batch_size, time_len))

    test_model = test_model.to(1)
    test_input = test_input.to(1)
    test_input_mask = test_input_mask.to(1)
    test_label = test_label.to(1)
    test_label_weight = test_label_weight.to(1)

    test_model.train()
    test_output = test_model.forward(test_input, test_input_mask)
    loss = test_model.calculate_loss(
        test_model, test_input, test_input_mask, test_label, test_label_weight
    )

    print()
