""" Full assembly of the parts to form the complete network """

import sys
import os

cur_path = os.path.split(__file__)[0]
sys.path.append(cur_path)
sys.path.append(os.path.join(cur_path, ".."))

from UNet.unet_parts import *
import torch
import numpy as np
import torch.nn as nn
from timm.models.layers import trunc_normal_
from NN.UNet.attention_unet import UNet3DWithSamplePoints
from NN.GNN.FiniteVolumeGN.EPDbackbone import EncoderProcesserDecoder
from torch_geometric.nn.pool import global_add_pool, global_mean_pool
from torch_scatter import scatter
from torch_geometric.nn import GlobalAttention
from TransolverDecoder import TransolverDecoder


class AttentionBlockForCd(nn.Module):
    def __init__(self, n_hidden, outdim=1):
        super(AttentionBlockForCd, self).__init__()
        self.n_hidden = n_hidden
        self.cls_token = nn.Parameter(torch.zeros(1, 1, n_hidden))  # 初始化CLS token
        self.cross_attention = nn.MultiheadAttention(
            embed_dim=n_hidden, num_heads=1, dropout=0
        )
        self.fc = nn.Linear(n_hidden, outdim)  # 全连接层，输入维度变为两倍

    def forward(self, graph_feature, unsampled_u_feature, graph_node):
        batch_size = graph_node.num_graphs
        cls_token = self.cls_token.expand(
            -1, batch_size, -1
        )  # 扩展 CLS token 到 batch_size 大小

        pooled_gfeature = (
            scatter(
                src=graph_feature,
                index=graph_node.graph_block_id,
                dim=0,
                reduce="mean",
                out=torch.zeros(
                    (
                        graph_node.graph_block_id.max().item() + 1,
                        graph_feature.size(-1),
                    ),
                    device=graph_feature.device,
                ),
            )
            .reshape(batch_size, -1, graph_feature.size(-1))
            .permute(1, 0, 2)
        )  # [seq_len, batch_size, embed_dim]

        pooled_ufeature = scatter(
            src=unsampled_u_feature,
            index=graph_node.grid_block_id[0],
            dim=1,
            reduce="mean",
        ).permute(
            1, 0, 2
        )  # [seq_len, batch_size, embed_dim]

        # 第一次交叉注意力，以 graph_feature 作为 query
        combined_features_ug = torch.cat((pooled_ufeature, pooled_gfeature), dim=0)
        combined_features_cls = torch.cat((cls_token, pooled_ufeature, pooled_gfeature), dim=0)
        attn_output_ug, _ = self.cross_attention(
            combined_features_cls, combined_features_ug, combined_features_ug
        )
        cls_output = attn_output_ug[0]  # [batch_size, embed_dim]

        # 回归输出
        pred_cd = self.fc(cls_output)  # [batch_size, 1]

        return pred_cd


class Model(nn.Module):
    def __init__(
        self,
        space_dim=1,
        n_layers=5,
        n_hidden=256,
        dropout=0,
        n_head=8,
        act="gelu",
        mlp_ratio=1,
        fun_dim=1,
        out_dim=1,
        slice_num=32,
        ref=8,
        unified_pos=False,
        params=None,
    ):
        super(Model, self).__init__()

        self.n_hidden = n_hidden
        self.unet = UNet3DWithSamplePoints(
            in_channels=1,
            out_channels=self.n_hidden,
            hidden_channels=self.n_hidden,
            num_levels=4,
        )

        self.fvgn = EncoderProcesserDecoder(
            message_passing_num=params.message_passing_num,
            edge_input_size=params.edge_input_size,
            node_input_size=params.node_input_size,
            node_output_size=params.node_output_size,
            hidden_size=n_hidden,
            params=params,
        )

        self.decoder = nn.Sequential(
            nn.Linear(n_hidden * 2, n_hidden * 4),
            nn.GELU(),
            nn.Linear(n_hidden * 4, params.node_output_size + n_hidden),
        )

        self.cd_decoder = AttentionBlockForCd(n_hidden, outdim=1)

        self.initialize_weights()

    def initialize_weights(self):
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=0.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, (nn.LayerNorm, nn.BatchNorm1d)):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def train_vel_press(
        self,
        graph_node=None,
        query_list=None,
        params=None,
    ):

        ufeatures, _ = self.unet(
            graph_node.voxel,
            query_list,
            half=False,
        )

        graph_feature = self.fvgn(
            graph_node=graph_node,
            params=params,
        )
        fx_node_wise = torch.cat((ufeatures, graph_feature), dim=-1)
        decoder_fx = self.decoder(fx_node_wise)
        
        pred_vel = decoder_fx[:, 0:3]
        pred_press = decoder_fx[:, 3:4]
        pred_pos = decoder_fx[:, 4:7]

        return pred_vel, pred_press, pred_pos
    
    def train_cd(
        self,
        graph_node=None,
        query_list=None,
        params=None,
    ):

        _, unsampled_u_feature = self.unet(
            graph_node.voxel,
            query_list,
            half=False,
        )

        graph_feature = self.fvgn(
            graph_node=graph_node,
            params=params,
        )
        pred_cd = self.cd_decoder(graph_feature, unsampled_u_feature, graph_node)

        return None,None,pred_cd

    def forward(
        self,
        graph_node=None,
        params=None,
        target="vel_press",
    ):
        query_list = []
        for i in range(graph_node.num_graphs):
            mask = i == graph_node.batch
            cur_query = graph_node.query[mask]
            cur_query = cur_query[:, [2, 0, 1]][None,]  # B, N, 3
            cur_query = cur_query.unsqueeze(2).unsqueeze(2)  # B, N(D),
            query_list.append(cur_query)

        if "vel_press" == target:
            pred_vel, pred_press, pred_pos = self.train_vel_press(
                graph_node=graph_node, query_list=query_list, params=params
            )
            return pred_vel, pred_press, pred_pos
        
        elif "cd" == target:
            _, _, pred_cd = self.train_cd(
                graph_node=graph_node, query_list=query_list, params=params
            )
            return _, _, pred_cd
        
        else:
            raise ValueError("target should be vel_press or cd")


