from .EPDbackbone import EncoderProcesserDecoder
import torch.nn as nn
import torch
from torch_geometric.data import Data
from utils.normalization import Normalizer
from timm.models.layers import trunc_normal_


class FVGN(nn.Module):
    def __init__(
        self,
        message_passing_num,
        node_input_size,
        edge_input_size,
        node_output_size,
        hidden_size,
        normlizer_steps,
        device,
        model_dir=None,
    ) -> None:
        super(FVGN, self).__init__()
        self._device = device
        self.node_input_size = node_input_size
        self.edge_input_size = edge_input_size
        self.model_dir = model_dir

        self.model = EncoderProcesserDecoder(
            message_passing_num=message_passing_num,
            edge_input_size=edge_input_size,
            node_input_size=node_input_size,
            node_output_size=node_output_size,
            hidden_size=hidden_size,
        ).to(device)

        self.node_normlizer = Normalizer(
            size=node_input_size,
            max_accumulations=normlizer_steps,
            epsilon=1e-8,
            device=device,
        )
        self.edge_normlizer = Normalizer(
            size=edge_input_size,
            max_accumulations=normlizer_steps,
            epsilon=1e-8,
            device=device,
        )

        self.initialize_weights()

        print("Simulator model initialized")

    def initialize_weights(self):
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=0.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, (nn.LayerNorm, nn.BatchNorm1d)):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def update_node_attr(
        self,
        node_attr,
        is_training=False,
    ):

        norm_node_feature = self.node_normlizer(node_attr, accumulate=is_training)

        return norm_node_feature

    def update_edge_attr(
        self,
        edge_attr,
        is_training=False,
    ):
        norm_edge_feature = self.edge_normlizer(edge_attr, accumulate=is_training)

        return norm_edge_feature

    def forward(
        self,
        graph_cell: Data = None,
        graph_edge: Data = None,
        graph_node: Data = None,
        params=None,
        is_training=True,
    ):
        """************************FORWARD**********************"""
        # forward model
        graph_node.x = self.update_node_attr(
            node_attr=graph_node.x,
            is_training=is_training,
        )

        graph_node.edge_attr = self.update_edge_attr(
            edge_attr=graph_node.edge_attr,
            is_training=is_training,
        )

        predicted_node_uvp = self.model(
            graph_node, graph_edge, graph_cell, params=params
        )

        return predicted_node_uvp

    def load_checkpoint(
        self, optimizer=None, scheduler=None, ckpdir=None, device=None, is_training=True
    ):
        if ckpdir is None:
            ckpdir = self.model_dir
        dicts = torch.load(ckpdir, map_location=device)
        self.load_state_dict(dicts["model"])
        keys = list(dicts.keys())
        keys.remove("model")
        if optimizer is not None:
            if type(optimizer) is not list:
                optimizer = [optimizer]
            for i, o in enumerate(optimizer):
                o.load_state_dict(dicts["optimizer{}".format(i)])
                keys.remove("optimizer{}".format(i))

        if scheduler is not None:
            if type(scheduler) is not list:
                scheduler = [scheduler]
            for i, s in enumerate(scheduler):
                s.load_state_dict(dicts["scheduler{}".format(i)])
                keys.remove("scheduler{}".format(i))

        if not is_training:
            for key in keys.copy():
                if key.find("optimizer") >= 0:
                    keys.remove(key)
                elif key.find("scheduler") >= 0:
                    keys.remove(key)

        print("Simulator model and optimizer/scheduler loaded checkpoint %s" % ckpdir)

    def save_checkpoint(self, path=None, optimizer=None, scheduler=None):
        if path is None:
            path = self.model_dir

        model = self.state_dict()

        to_save = {"model": model}

        if type(optimizer) is not list:
            optimizer = [optimizer]
        for i, o in enumerate(optimizer):
            to_save.update({"optimizer{}".format(i): o.state_dict()})

        if type(scheduler) is not list:
            scheduler = [scheduler]
        for i, s in enumerate(scheduler):
            # to_save.update({"scheduler{}".format(i): s.get_variable()})
            to_save.update({"scheduler{}".format(i): s.state_dict()})

        torch.save(to_save, path)
        print("Simulator model saved at %s" % path)
