""" Full assembly of the parts to form the complete network """

import sys
import os

cur_path = os.path.split(__file__)[0]
sys.path.append(cur_path)
sys.path.append(os.path.join(cur_path, ".."))
from UNet.unet_parts import *
from Model import Physics_Attention_1D, MLP
import torch
import numpy as np
import torch.nn as nn
from timm.models.layers import trunc_normal_
from einops import rearrange, repeat
from torch_scatter import scatter_softmax, scatter
from NN.UNet.Unet_GINO import UNet3DWithSamplePoints
from NN.GNN.FiniteVolumeGN.EPDbackbone import EncoderProcesserDecoder
from utils.normalization import Normalizer


class Model(nn.Module):
    def __init__(
        self,
        space_dim=1,
        n_layers=5,
        n_hidden=256,
        dropout=0,
        n_head=8,
        act="gelu",
        mlp_ratio=1,
        fun_dim=1,
        out_dim=1,
        slice_num=32,
        ref=8,
        unified_pos=False,
        params=None,
    ):
        super(Model, self).__init__()

        self.n_hidden = n_hidden
        self.n_fvgn_hidden = self.n_hidden
        
        self.space_dim = space_dim

        self.unet_o_dim = n_hidden
        self.unet = UNet3DWithSamplePoints(1, self.unet_o_dim, self.unet_o_dim, 4)
        self.uln = nn.LayerNorm(self.unet_o_dim)
        self.last_layer = nn.Sequential(
            nn.Linear(n_hidden + self.n_fvgn_hidden, n_hidden * 4),
            nn.GELU(),
            nn.Linear(n_hidden * 4, out_dim),
        )
        self.fvgn = EncoderProcesserDecoder(
            message_passing_num=params.message_passing_num,
            cell_input_size=params.cell_input_size,
            edge_input_size=params.edge_input_size,
            node_input_size=params.node_input_size,
            cell_output_size=params.cell_output_size,
            edge_output_size=params.edge_output_size,
            node_output_size=params.node_output_size,
            hidden_size= self.n_fvgn_hidden,
        )
        self.edge_norm = Normalizer(
            params.edge_input_size, params.dataset_size // params.batch_size
        )
        self.initialize_weights()

    def initialize_weights(self):
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=0.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, (nn.LayerNorm, nn.BatchNorm1d)):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def forward(self, x, graph_node, graph_edge=None, graph_cell=None, params=None):

        graph_node.edge_attr = self.edge_norm(graph_node.edge_attr)
        graph_feature = self.fvgn(graph_node, graph_edge, graph_cell, params=None)

        pos, fx = None, None
        batch_size = graph_node.num_graphs
        half = True

        ufeatures = self.unet.forward(graph_node.voxel, graph_node.query, half)
        ufeatures = ufeatures.reshape(batch_size * 3586, -1)
        fx = self.last_layer(torch.cat((self.uln(ufeatures), graph_feature), dim=-1))
        fx = fx.reshape(batch_size * 3586, 1)
        return fx
