import torch
import torch.nn as nn
from typing import Tuple, Union
from analytics.crop_recognition.models.base_model import (
    BaseClassificationModel,
)

import torch.nn.functional as F


class LSTM_crop_model(BaseClassificationModel):
    def __init__(
        self,
        input_size=10,
        hidden_size=512,
        num_layers=2,
        output_size=2,
        bidirectional=False,
    ):
        """
        Initialization of the LSTM classifier model.
        Bassically this module combine Pytorch LSTM and a 1-layer Multi-Layer Perception together for
        time sequence data classification.


        Parameters
        ----------
        input_size : int, optional
            The channel of input, by default 10, which is the len of ASSET_NAMES
        hidden_size : int, optional
            Hidden size of LSTM, get more information from pytorch docs: LSTM, by default 512
        num_layers : int, optional
            Layer number of the LSTM, get more information from pytorch docs: LSTM, by default 2
        output_size : int, optional
            Output channel of the model, by default 2
        bidirectional : bool, optional
            Whether use bidirectional structure for LSTM,  get more information from pytorch docs: LSTM,
            by default False
        """

        super().__init__()
        self.arguments = locals()
        self.arguments.pop("self")
        self.arguments.pop("__class__")
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.output_size = output_size
        self.backbone = nn.LSTM(
            self.input_size,
            self.hidden_size,
            self.num_layers,
            bidirectional=bidirectional,
            batch_first=True,
        )
        bidirectional_factor = 2 if bidirectional else 1
        self.head = nn.Linear(bidirectional_factor * self.hidden_size, self.output_size)

        self.soft_max = nn.Softmax(dim=2)

    def forward(self, src: torch.FloatTensor, src_padding_mask: torch.BoolTensor):
        """
        Forward the model given input data


        Parameters
        ----------
        input_data : Torch.FloatTensor
            The input tensor, shape should be (N, T, C_in)

        Returns
        -------
        Torch.FloatTensor
            Result probability tensor for each sample, shape of (N, T, C_out)
        """

        src[~src_padding_mask, :] = 0
        output, _ = self.backbone(src)  # output (N, T, D * H)
        pred = self.head(output)  # pred (N, T, C)

        result = self.soft_max(pred)
        return result

    @staticmethod
    def loss_calculation(
        model: BaseClassificationModel,
        src: torch.FloatTensor,
        src_padding_mask: torch.BoolTensor,
        label: torch.LongTensor,
        label_weight: torch.FloatTensor,
        scale: float,
    ):
        output = model.forward(src, src_padding_mask)  # (N, T, C)
        output = torch.log(output)
        loss = F.nll_loss(output, label, reduce="none")
        loss = torch.sum(loss * label_weight) * scale

        if model.training:
            loss.backward()

        return float(loss.item()), float(torch.sum(label_weight))

    @staticmethod
    def update_evaluator(model, src, src_padding_mask, label, label_weight, evaluator):
        output = model.forward(src, src_padding_mask)  # (N, T, C)
        output = torch.argmax(output, dim=-1)  # (N, T)
        _, T = output.shape
        time_stamp = torch.arange(0, T, 1).expand_as(output)
        label_mask = label_weight > 0.5
        output = output[label_mask].reshape(-1)
        time_stamp = time_stamp[label_mask].reshape(-1)

        evaluator.add(output, label, time_stamp)


if __name__ == "__main__":
    import datetime
    import os
    from analytics.crop_identifier_system.crop_sys_common.ChannelOrder import (
        ASSET_NAMES,
    )
    from analytics.crop_recognition_training_inference.models.model_saving_obj import (
        ModelSavingObject,
    )

    savefolder = "/NAS6/Members/chendu/temp_folder/crop_inference"
    if not os.path.exists(savefolder):
        os.makedirs(savefolder)

    model = LSTM_crop_model(10, 512, 2, 3)

    test_parameter_dict = {
        "crop_index": ["negative", "soybean", "corn"],
        "model": model.get_model_parameter_dict(),
        "data": {
            "start_datetime": datetime.datetime(2020, 3, 1),
            "end_datetime": datetime.datetime(2020, 10, 1),
            "delta_date": 8,
            "asset_names": ASSET_NAMES,
        },
    }

    model_saving_obj = ModelSavingObject(
        test_parameter_dict["crop_index"],
        test_parameter_dict["model"],
        test_parameter_dict["data"],
    )
    print("all done")
    torch.save(model_saving_obj, os.path.join(savefolder, "test_model_para.pth"))
