import os
import sys

cur_path = os.path.split(__file__)[0]
sys.path.append(cur_path)

import torch
import numpy as np
import torch.nn as nn
from torch_geometric.data import Data


class FVGN(nn.Module):
    def __init__(self, params) -> None:
        super().__init__()

        self.params = params
        self.mixing_type = params.SAGE_MIXING_TYPE
        from FVGNAttUnet import Model


        self.ref = 3

        space_dim = (
            params.node_input_size
            if self.mixing_type != "Origin_unify"
            else params.node_input_size + self.ref**3
        )
        self.nn = Model(
            space_dim=space_dim,
            n_hidden=params.hidden_size,
            n_layers=8,
            fun_dim=0,
            n_head=8,
            mlp_ratio=2,
            out_dim=1,
            slice_num=32,
            unified_pos=0,
            ref=self.ref,
            params=params,
        )

    def forward(
        self,
        graph_cell: Data = None,
        graph_edge: Data = None,
        graph_node: Data = None,
        params=None,
        is_training=True,
    ):

        x = graph_node.x
        pos = graph_node.pos
        if self.mixing_type == "Origin_unify":
            new_pos = self.get_grid(pos[None, :, :])
            new_pos = new_pos.squeeze()
            x = torch.cat([x, new_pos], -1)

        output = self.nn(
            x, graph_node, graph_edge, graph_cell, params, is_training=is_training
        )
        return output
        # B C D H W

    def load_checkpoint(
        self, optimizer=None, scheduler=None, ckpdir=None, device=None, is_training=True
    ):
        if ckpdir is None:
            ckpdir = self.model_dir
        dicts = torch.load(ckpdir, map_location=device)
        self.load_state_dict(dicts["model"])
        keys = list(dicts.keys())
        keys.remove("model")
        if optimizer is not None:
            if type(optimizer) is not list:
                optimizer = [optimizer]
            for i, o in enumerate(optimizer):
                o.load_state_dict(dicts["optimizer{}".format(i)])
                keys.remove("optimizer{}".format(i))

        if scheduler is not None:
            if type(scheduler) is not list:
                scheduler = [scheduler]
            for i, s in enumerate(scheduler):
                s.load_state_dict(dicts["scheduler{}".format(i)])
                keys.remove("scheduler{}".format(i))

        if not is_training:
            for key in keys.copy():
                if key.find("optimizer") >= 0:
                    keys.remove(key)
                elif key.find("scheduler") >= 0:
                    keys.remove(key)

        print("Simulator model and optimizer/scheduler loaded checkpoint %s" % ckpdir)

    def save_checkpoint(self, path=None, optimizer=None, scheduler=None):

        if path is None:
            path = self.model_dir

        model = self.state_dict()

        to_save = {"model": model}

        if type(optimizer) is not list:
            optimizer = [optimizer]
        for i, o in enumerate(optimizer):
            to_save.update({"optimizer{}".format(i): o.state_dict()})

        if type(scheduler) is not list:
            scheduler = [scheduler]
        for i, s in enumerate(scheduler):
            # to_save.update({"scheduler{}".format(i): s.get_variable()})
            to_save.update({"scheduler{}".format(i): s.state_dict()})

        torch.save(to_save, path)
        print("Simulator model saved at %s" % path)


    def get_grid(self, my_pos):
        # my_pos 1 N 3
        batchsize = my_pos.shape[0]

        gridx = torch.tensor(np.linspace(-1.5, 1.5, self.ref), dtype=torch.float)
        gridx = gridx.reshape(1, self.ref, 1, 1, 1).repeat(
            [batchsize, 1, self.ref, self.ref, 1]
        )
        gridy = torch.tensor(np.linspace(0, 2, self.ref), dtype=torch.float)
        gridy = gridy.reshape(1, 1, self.ref, 1, 1).repeat(
            [batchsize, self.ref, 1, self.ref, 1]
        )
        gridz = torch.tensor(np.linspace(-4, 4, self.ref), dtype=torch.float)
        gridz = gridz.reshape(1, 1, 1, self.ref, 1).repeat(
            [batchsize, self.ref, self.ref, 1, 1]
        )
        grid_ref = (
            torch.cat((gridx, gridy, gridz), dim=-1)
            .cuda()
            .reshape(batchsize, self.ref**3, 3)
        )  # B 4 4 4 3

        pos = (
            torch.sqrt(
                torch.sum(
                    (my_pos[:, :, None, :] - grid_ref[:, None, :, :]) ** 2, dim=-1
                )
            )
            .reshape(batchsize, my_pos.shape[1], self.ref * self.ref * self.ref)
            .contiguous()
        )
        return pos
