""" Full assembly of the parts to form the complete network """

import sys
import os

cur_path = os.path.split(__file__)[0]
sys.path.append(cur_path)
sys.path.append(os.path.join(cur_path, ".."))
from UNet.unet_parts import *
from Model import Physics_Attention_1D, MLP
import torch
import numpy as np
import torch.nn as nn
from timm.models.layers import trunc_normal_
from einops import rearrange, repeat
from torch_scatter import scatter_softmax, scatter
from NN.UNet.Unet_GINO import UNet3DWithSamplePoints


class Transolver_block(nn.Module):
    """Transformer encoder block."""

    def __init__(
        self,
        num_heads: int,
        hidden_dim: int,
        dropout: float,
        act="gelu",
        mlp_ratio=4,
        last_layer=False,
        out_dim=1,
        slice_num=32,
    ):
        super().__init__()
        self.last_layer = last_layer
        self.ln_1 = nn.LayerNorm(hidden_dim)
        self.Attn = Physics_Attention_1D(
            hidden_dim,
            heads=num_heads,
            dim_head=hidden_dim // num_heads,
            dropout=dropout,
            slice_num=slice_num,
        )
        self.ln_2 = nn.LayerNorm(hidden_dim)
        self.mlp = MLP(
            hidden_dim,
            hidden_dim * mlp_ratio,
            hidden_dim,
            n_layers=0,
            res=False,
            act=act,
        )
        if self.last_layer:
            self.ln_3 = nn.LayerNorm(hidden_dim)
            self.mlp2 = nn.Linear(hidden_dim, out_dim)

    def forward(self, fx):
        fx = self.Attn(self.ln_1(fx)) + fx
        fx = self.mlp(self.ln_2(fx)) + fx
        if self.last_layer:
            return self.mlp2(self.ln_3(fx))
        else:
            return fx


# mt = Multiscale_Transolver_block(hidden_dim=32, dropout=0, num_heads=8)
# rst = mt(torch.randn([1, 3148, 32]))
# print(rst.shape)


class Model(nn.Module):
    def __init__(
        self,
        space_dim=1,
        n_layers=5,
        n_hidden=256,
        dropout=0,
        n_head=8,
        act="gelu",
        mlp_ratio=1,
        fun_dim=1,
        out_dim=1,
        slice_num=32,
        ref=8,
        unified_pos=False,
    ):
        super(Model, self).__init__()
        self.__name__ = "UniPDE_3D"
        self.ref = ref
        self.unified_pos = unified_pos
        if self.unified_pos:
            self.preprocess = MLP(
                fun_dim + self.ref * self.ref * self.ref,
                n_hidden * 2,
                n_hidden,
                n_layers=0,
                res=False,
                act=act,
            )
        else:
            self.preprocess = MLP(
                fun_dim + space_dim,
                n_hidden * 2,
                n_hidden,
                n_layers=0,
                res=False,
                act=act,
            )

        self.n_hidden = n_hidden
        self.space_dim = space_dim

        self.blocks = nn.ModuleList(
            [
                Transolver_block(
                    num_heads=n_head,
                    hidden_dim=n_hidden,
                    dropout=dropout,
                    act=act,
                    mlp_ratio=mlp_ratio,
                    out_dim=out_dim,
                    slice_num=slice_num,
                    last_layer=False,
                )
                for _ in range(n_layers)
            ]
        )
        self.unet_o_dim = n_hidden 
        self.unet = UNet3DWithSamplePoints(1, self.unet_o_dim, self.unet_o_dim, 4)
        # self.last_layer = nn.Sequential(
        #     nn.LayerNorm( + self.unet_o_dim),
        #     nn.Linear(n_hidden + self.unet_o_dim, n_hidden * 4),
        #     nn.GELU(),
        #     nn.LayerNorm(n_hidden * 4),
        #     nn.Linear(n_hidden * 4, out_dim),
        # )
        self.last_layer = nn.Sequential(
            nn.LayerNorm(self.unet_o_dim),
            nn.Linear(self.unet_o_dim, n_hidden * 2),
            nn.GELU(),
            nn.LayerNorm(n_hidden * 2),
            nn.Linear(n_hidden * 2, out_dim),
        )
        self.initialize_weights()
        self.placeholder = nn.Parameter(
            (1 / (n_hidden)) * torch.rand(n_hidden, dtype=torch.float)
        )

    def initialize_weights(self):
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=0.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, (nn.LayerNorm, nn.BatchNorm1d)):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def forward(self, x, graph_node, params=None):
        # cfd_data, geom_data = data
        # x, fx, T = cfd_data.x, None, None
        # x  : node features
        # pos: node pos
        pos, fx = None, None
        batch_size = x.shape[0] // 3586
        
        
        # x = x[None, :, :]
        # if self.unified_pos:
        #     new_pos = self.get_grid(pos[None, :, :])
        #     x = torch.cat((x, new_pos), dim=-1)

        # if fx is not None:
        #     fx = torch.cat((x, fx), -1)
        #     fx = self.preprocess(fx)
        # else:
        #     fx = self.preprocess(x)
        #     fx = fx + self.placeholder[None, None, :]
            
        # fx = fx.reshape(batch_size, 3586, -1)
        
        # for block in self.blocks:
        #     fx = block(fx)
        half = True
        ufeatures = self.unet.forward(graph_node.voxel, graph_node.query, half)

        # fx = torch.cat([fx, ufeatures], dim=-1)
        # fx = self.last_layer(fx)
        fx = self.last_layer(ufeatures)
        fx = fx.reshape(batch_size * 3586, 1)
        return fx
