import torch
import torch.nn as nn
import numpy as np
from gns import graph_network
from gns import data_loader
from torch_geometric.nn import radius_graph
from typing import Dict
import trimesh
from scipy.spatial import KDTree
from pysdf import SDF
import rtree


class LearnedSimulator(nn.Module):
  """Learned simulator from https://arxiv.org/pdf/2002.09405.pdf."""

  def __init__(
          self,
          stl_path,
          particle_dimensions: int,
          nnode_in: int,
          nedge_in: int,
          latent_dim: int,
          nmessage_passing_steps: int,
          nmlp_layers: int,
          mlp_hidden_dim: int,
          connectivity_radius: float,
          boundaries: np.ndarray,
          normalization_stats: dict,
          nparticle_types: int,
          particle_type_embedding_size: int,
          boundary_clamp_limit: float = 1.0,
          device="cpu"
  ):
    """Initializes the model.

    Args:
      particle_dimensions: Dimensionality of the problem.
      nnode_in: Number of node inputs.
      nedge_in: Number of edge inputs.
      latent_dim: Size of latent dimension (128)
      nmessage_passing_steps: Number of message passing steps.
      nmlp_layers: Number of hidden layers in the MLP (typically of size 2).
      connectivity_radius: Scalar with the radius of connectivity.
      boundaries: Array of 2-tuples, containing the lower and upper boundaries
        of the cuboid containing the particles along each dimensions, matching
        the dimensionality of the problem.
      normalization_stats: Dictionary with statistics with keys "acceleration"
        and "velocity", containing a named tuple for each with mean and std
        fields, matching the dimensionality of the problem.
      nparticle_types: Number of different particle types.
      particle_type_embedding_size: Embedding size for the particle type.
      boundary_clamp_limit: a factor to enlarge connectivity radius used for computing
        normalized clipped distance in edge feature.
      device: Runtime device (cuda or cpu).
    """
    self._mesh = trimesh.load(stl_path)
    self._mesh.apply_scale(0.001)  # 毫米转米

    # 创建加速查询结构
    # self._sdf_kdtree = KDTree(self._mesh.vertices)
    self._sdf_fn = SDF(self._mesh.vertices, self._mesh.faces)

    super(LearnedSimulator, self).__init__()
    self._boundaries = boundaries
    self._connectivity_radius = connectivity_radius
    self._normalization_stats = normalization_stats
    self._nparticle_types = nparticle_types
    self._boundary_clamp_limit = boundary_clamp_limit

    # Particle type embedding has shape (9, 16)
    self._particle_type_embedding = nn.Embedding(
        nparticle_types, particle_type_embedding_size)

    #print(f"simulator ecode particle_type_embeddings: {self._particle_type_embedding}")

    # Initialize the EncodeProcessDecode
    self._encode_process_decode = graph_network.EncodeProcessDecode(
        nnode_in_features=nnode_in,
        nnode_out_features=particle_dimensions,
        nedge_in_features=nedge_in,
        latent_dim=latent_dim,
        nmessage_passing_steps=nmessage_passing_steps,
        nmlp_layers=nmlp_layers,
        mlp_hidden_dim=mlp_hidden_dim)

    self._device = device

  def forward(self):
    """Forward hook runs on class instantiation"""
    pass

  def _compute_stl_features(self, positions):
      """使用trimesh内置方法替代"""
      # 初始化默认值
      sdf_values = torch.zeros(positions.shape[0], device=self._device).float()
      normals = torch.zeros(positions.shape[0], 3, device=self._device).float()

      # 如果没有有效的网格，返回默认值
      if not hasattr(self, '_mesh') or self._mesh is None:
          return sdf_values, normals

      try:
          positions_np = positions.detach().cpu().numpy().astype(np.float32)

          # 使用trimesh计算SDF
          if hasattr(self, '_sdf_fn') and self._sdf_fn is not None:
              sdf_values_np = self._sdf_fn(positions_np)
              sdf_values = torch.tensor(sdf_values_np, device=self._device).float()
          else:
              # 后备SDF计算
              sdf_values = self._compute_stl_sdf(positions)

          sdf_values = - abs(sdf_values)

          # 使用trimesh的nearest模块计算法向量
          normals_np = np.zeros((len(positions_np), 3), dtype=np.float32)

          # 获取最近点和三角形信息
          closest, distances, triangle_ids = self._mesh.nearest.on_surface(positions_np)

          # 有效三角形ID的处理
          valid_ids = np.where((triangle_ids >= 0) & (triangle_ids < len(self._mesh.faces)))

          if len(valid_ids[0]) > 0:
              # 使用面法向量
              normals_np[valid_ids] = self._mesh.face_normals[triangle_ids[valid_ids]]

          # 处理无效三角形ID的情况
          invalid_ids = np.where(triangle_ids < 0)
          if len(invalid_ids[0]) > 0:
              # 计算点到最近点的方向向量
              direction_vectors = positions_np[invalid_ids] - closest[invalid_ids]
              norms = np.linalg.norm(direction_vectors, axis=1, keepdims=True)
              norms[norms == 0] = 1.0
              direction_vectors /= norms
              normals_np[invalid_ids] = direction_vectors

          # 转换为PyTorch张量
          normals = torch.tensor(normals_np, device=self._device).float()
          normals = - normals
          # print(f"simulator  sdf_values: {sdf_values}")
          # print(f"simulator  normals: {normals}")

      except Exception as e:
          print(f"Error in _compute_stl_features: {e}")
          # 出错时使用默认值

      return sdf_values, normals

  def _compute_graph_connectivity(
          self,
          node_features: torch.tensor,
          nparticles_per_example: torch.tensor,
          radius: float,
          add_self_edges: bool = True):
    """Generate graph edges to all particles within a threshold radius

    Args:
      node_features: Node features with shape (nparticles, dim).
      nparticles_per_example: Number of particles per example. Default is 2
        examples per batch.
      radius: Threshold to construct edges to all particles within the radius.
      add_self_edges: Boolean flag to include self edge (default: True)
    """
    # Specify examples id for particles
    batch_ids = torch.cat(
        [torch.LongTensor([i for _ in range(n)])
         for i, n in enumerate(nparticles_per_example)]).to(self._device)

    # radius_graph accepts r < radius not r <= radius
    # A torch tensor list of source and target nodes with shape (2, nedges)
    edge_index = radius_graph(
        node_features, r=radius, batch=batch_ids, loop=add_self_edges, max_num_neighbors=128)

    # The flow direction when using in combination with message passing is
    # "source_to_target"
    receivers = edge_index[0, :]
    senders = edge_index[1, :]

    return receivers, senders

  def _compute_stl_sdf(self, positions):
      """使用PySDF计算点到STL表面的符号距离"""
      # 如果没有有效的SDF计算器，返回零距离
      if self._sdf_fn is None or self._mesh is None:
          return torch.zeros(positions.shape[0], device=self._device).float()

      # 将张量转换为numpy数组（PySDF需要numpy输入）
      positions_np = positions.detach().cpu().numpy().astype(np.float32)

      # 使用PySDF计算符号距离
      sdf_values = self._sdf_fn(positions_np)

      # 返回PyTorch张量
      return torch.tensor(sdf_values, device=self._device).float()

  def _encoder_preprocessor(
          self,
          position_sequence: torch.tensor,
          nparticles_per_example: torch.tensor,
          particle_types: torch.tensor,
          material_property: torch.tensor):
    """Extracts important features from the position sequence. Returns a tuple
    of node_features (nparticles, 30), edge_index (nparticles, nparticles), and
    edge_features (nparticles, 3).

    Args:
      position_sequence: A sequence of particle positions. Shape is
        (nparticles, 6, dim). Includes current + last 5 positions
      nparticles_per_example: Number of particles per example. Default is 2
        examples per batch.
      particle_types: Particle types with shape (nparticles).
      material_property: Shape is (nparticles, 6, 8)
    """
    nparticles = position_sequence.shape[0]
    #print(f"simulator  position_sequence: {position_sequence}")
    most_recent_position = position_sequence[:, -1]  # (n_nodes, 2)
    velocity_sequence = time_diff(position_sequence)
    #print(f"simulator  velocity_sequence: {velocity_sequence.shape}")

    senders, receivers = self._compute_graph_connectivity(
        most_recent_position, nparticles_per_example, self._connectivity_radius)

    node_features = []

    #most_recent_material_property = material_property[:, -1]
    #print(f"simulator  material_property: {material_property.shape}")
    # material_property = most_recent_material_property.view(nparticles, 8)
    # print(f"simulator  material_property: {material_property.shape}")
    #material_property = material_property.view(nparticles, 8)
    #material_property = material_property.view(nparticles, -1)
    #print(f"simulator  2material_property: {material_property.shape}")

    #node_features.append(material_property)

    # Normalized velocity sequence, merging spatial an time axis.
    velocity_stats = self._normalization_stats["velocity"]
    normalized_velocity_sequence = (
        velocity_sequence - velocity_stats['mean']) / velocity_stats['std']
    #print(f"simulator  normalized_velocity_sequence: {normalized_velocity_sequence.shape}")
    flat_velocity_sequence = normalized_velocity_sequence.view(
        nparticles, -1)
    #print(f"simulator  flat_velocity_sequence: {flat_velocity_sequence.shape}")
    # There are 5 previous steps, with dim 2
    # node_features shape (nparticles, 5 * 2 = 10)
    node_features.append(flat_velocity_sequence)

    # # 提取最近边界的信息（4维特征：sdf_value, normal_x, normal_y, normal_z）
    # sdf_value = material_property[:, 0:1]  # SDF值 [nparticles, 1]
    # boundary_normal = material_property[:, 1:4]  # 法向量 [nparticles, 3]

    # print(f"simulator sdf_value: {sdf_value.shape}")
    # print(f"simulator boundary_normal: {boundary_normal.shape}")
    #
    # # 创建特征向量：归一化法向量 + 距离特征 (共4维特征)
    # normalized_normal = boundary_normal / self._connectivity_radius
    #
    # boundary_features = torch.cat([
    #     torch.clamp(
    #         normalized_normal,
    #         -self._boundary_clamp_limit,
    #         self._boundary_clamp_limit
    #     ),
    #     torch.clamp(
    #         sdf_value,
    #         -self._boundary_clamp_limit,
    #         self._boundary_clamp_limit
    #     )
    # ], dim=-1)  # [nparticles, 4]
    #
    # # 替换原有node_features.append(...)
    # node_features.append(boundary_features)
    #print(f"simulator  material_property: {material_property.shape}")
    # sdf_values_list = []
    # normals_list = []

    geom1_sdf = material_property[..., 3].squeeze()
    geom2_sdf = material_property[..., 7].squeeze()

    # print(f"simulator  geom1_sdf: {geom1_sdf.shape}")
    # print(f"simulator  geom2_sdf: {geom2_sdf.shape}")

    geom1_gradient = material_property[..., 0:3].squeeze()
    geom2_gradient = material_property[..., 4:7].squeeze()

    # print(f"simulator  geom1_gradient: {geom1_gradient.shape}")
    # print(f"simulator  geom2_gradient: {geom2_gradient.shape}")

    geom1_sdf = - geom1_sdf
    geom2_sdf = geom2_sdf
    geom1_gradient = geom1_gradient
    geom2_gradient = - geom2_gradient
    # print(f"simulator  geom1_gradient: {geom1_gradient}")
    # print(f"simulator  geom2_gradient: {geom2_gradient}")

    # sdf_values_list.append(geom1_sdf)
    # sdf_values_list.append(geom2_sdf)
    #
    # normals_list.append(geom1_gradient)
    # normals_list.append(geom2_gradient)
    #
    # # 对于每个粒子，找出最近的边界（最小的绝对SDF值）
    # all_sdf = torch.stack(sdf_values_list, dim=1)  # [nparticles, n_boundaries]
    # all_normals = torch.stack(normals_list, dim=1)  # [nparticles, n_boundaries, 3]
    #
    # # 找到最近边界索引
    # abs_sdf = torch.abs(all_sdf)
    # min_indices = torch.argmin(abs_sdf, dim=1)  # [nparticles]
    #
    # # 选择最近的边界特征
    # sdf_values = all_sdf.gather(1, min_indices.unsqueeze(1)).squeeze(1)
    # normals = all_normals[torch.arange(all_normals.size(0)), min_indices]  # [nparticles, 3]

    distance_feature1 = geom1_sdf.unsqueeze(-1)
    distance_feature2 = geom2_sdf.unsqueeze(-1)
    normalized_normals1 = geom1_gradient
    normalized_normals2 = geom2_gradient

    # geom1_sdf = geom1_sdf.view(nparticles, -1)
    # geom2_sdf = geom2_sdf.view(nparticles, -1)
    # print(f"simulator  position_sequence: {position_sequence}")
    #
    # print(f"simulator  geom1_sdf: {geom1_sdf}")
    # print(f"simulator  geom2_sdf: {geom2_sdf}")
    # print(f"simulator  abs_sdf: {abs_sdf}")
    # print(f"simulator  min_indices: {min_indices}")
    # print(f"simulator  sdf_values: {sdf_values}")

    # print(f"simulator  distance_feature1: {distance_feature1}")
    # print(f"simulator  normals1: {normalized_normals1}")
    # print(f"simulator  distance_feature2: {distance_feature2}")
    # print(f"simulator  normals2: {normalized_normals2}")
    #print(f"simulator  normalized_normals: {normalized_normals}")

    #print(f"simulator geom1_gradient: {geom1_gradient[:2]}")

    # test_indices = (geom1_sdf.abs() < 0.005).nonzero().squeeze()
    # print("1法向量示例:", geom1_gradient[test_indices[:5]])
    #
    # test_indices = (geom2_sdf.abs() < 0.005).nonzero().squeeze()
    # print("2法向量示例:", geom2_gradient[test_indices[:5]])

    boundary_features = torch.cat([
        torch.clamp(
            distance_feature1,
            -self._boundary_clamp_limit,
            self._boundary_clamp_limit
        ),
        torch.clamp(
            normalized_normals1,
            -self._boundary_clamp_limit,
            self._boundary_clamp_limit
        ),
        torch.clamp(
            distance_feature2,
            -self._boundary_clamp_limit,
            self._boundary_clamp_limit
        ),
        torch.clamp(
            normalized_normals2,
            -self._boundary_clamp_limit,
            self._boundary_clamp_limit
        ),

    ], dim=-1)  # [nparticles, 8]

    # 添加处理后的边界特征
    node_features.append(boundary_features)

    if self._sdf_fn is not None:
        # 计算STL特征（距离和法向量）
        static_sdf_values, static_normals = self._compute_stl_features(most_recent_position)

        # 创建特征向量：距离 + XYZ法向量 (共4维特征)
        distance_feature = static_sdf_values.unsqueeze(-1)
        normalized_normals = static_normals / self._connectivity_radius

        static_boundary_features = torch.cat([
            torch.clamp(
                distance_feature / self._connectivity_radius,
                -self._boundary_clamp_limit,
                self._boundary_clamp_limit
            ),
            torch.clamp(
                normalized_normals,
                -self._boundary_clamp_limit,
                self._boundary_clamp_limit
            )
        ], dim=-1)  # [nparticles, 4]
    else:
        # 没有STL时使用原有方形边界计算
        boundaries = torch.tensor(
            self._boundaries, requires_grad=False).float().to(self._device)
        distance_to_lower_boundary = (
                most_recent_position - boundaries[:, 0][None])
        distance_to_upper_boundary = (
                boundaries[:, 1][None] - most_recent_position)
        distance_to_boundaries = torch.cat(
            [distance_to_lower_boundary, distance_to_upper_boundary], dim=1)
        static_boundary_features = torch.clamp(
            distance_to_boundaries / self._connectivity_radius,
            -self._boundary_clamp_limit, self._boundary_clamp_limit)

    node_features.append(static_boundary_features)


    # # 创建新的边界特征组，包含SDF和梯度信息
    # boundary_features = []
    # #
    # # 几何体1的特征：SDF + 梯度
    # normalized_geom1_sdf = torch.clamp(
    #     geom1_sdf / self._connectivity_radius,
    #     -self._boundary_clamp_limit, self._boundary_clamp_limit
    # )
    # print(f"simulator  normalized_geom1_sdf: {normalized_geom1_sdf.shape}")
    # boundary_features.append(normalized_geom1_sdf)
    # boundary_features.append(geom1_gradient)
    #
    # # 几何体2的特征：SDF + 梯度
    # normalized_geom2_sdf = torch.clamp(
    #     geom2_sdf / self._connectivity_radius,
    #     -self._boundary_clamp_limit, self._boundary_clamp_limit
    # )
    # boundary_features.append(normalized_geom2_sdf)
    # boundary_features.append(geom2_gradient)
    #
    # # 合并所有边界特征
    # combined_boundary_features = torch.cat(boundary_features, dim=1)

    # Normalized clipped distances to lower and upper boundaries.
    # boundaries are an array of shape [num_dimensions, 2], where the second
    # axis, provides the lower/upper boundaries.
    # boundaries = torch.tensor(
    #     self._boundaries, requires_grad=False).float().to(self._device)
    # distance_to_lower_boundary = (
    #     most_recent_position - boundaries[:, 0][None])
    # distance_to_upper_boundary = (
    #     boundaries[:, 1][None] - most_recent_position)
    # distance_to_boundaries = torch.cat(
    #     [distance_to_lower_boundary, distance_to_upper_boundary], dim=1)
    # #print(f"simulator  distance_to_boundaries: {distance_to_boundaries}")
    # normalized_clipped_distance_to_boundaries = torch.clamp(
    #     distance_to_boundaries,
    #     -self._boundary_clamp_limit, self._boundary_clamp_limit)
    # # The distance to 4 boundaries (top/bottom/left/right)
    # # node_features shape (nparticles, 10+4)
    # node_features.append(normalized_clipped_distance_to_boundaries)

    # Particle type
    if self._nparticle_types > 1:
      particle_type_embeddings = self._particle_type_embedding(particle_types)

      #print(f"simulator ecode particle_types: {particle_types}")
      node_features.append(particle_type_embeddings)

      #print(f"simulator ecode particle_type_embeddings: {particle_type_embeddings}")
    # Final node_features shape (nparticles, 30) for 2D (if material_property is not valid in training example)
    # 30 = 10 (5 velocity sequences*dim) + 4 boundaries + 16 particle embedding

    # Material property
    # if material_property is not None:
    #     most_recent_material_property = material_property[:, -1]
    #     material_property = material_property.view(nparticles, 8)
    #     node_features.append(material_property)
    # Final node_features shape (nparticles, 31) for 2D
    # 31 = 10 (5 velocity sequences*dim) + 4 boundaries + 16 particle embedding + 1 material property

    # Collect edge features.
    edge_features = []

    # Relative displacement and distances normalized to radius
    # with shape (nedges, 2)
    # normalized_relative_displacements = (
    #     torch.gather(most_recent_position, 0, senders) -
    #     torch.gather(most_recent_position, 0, receivers)
    # ) / self._connectivity_radius
    normalized_relative_displacements = (
        most_recent_position[senders, :] -
        most_recent_position[receivers, :]
    ) / self._connectivity_radius

    # Add relative displacement between two particles as an edge feature
    # with shape (nparticles, ndim)
    edge_features.append(normalized_relative_displacements)

    # Add relative distance between 2 particles with shape (nparticles, 1)
    # Edge features has a final shape of (nparticles, ndim + 1)
    normalized_relative_distances = torch.norm(
        normalized_relative_displacements, dim=-1, keepdim=True)
    edge_features.append(normalized_relative_distances)

    return (torch.cat(node_features, dim=-1),
            torch.stack([senders, receivers]),
            torch.cat(edge_features, dim=-1))

  def _decoder_postprocessor(
          self,
          normalized_acceleration: torch.tensor,
          position_sequence: torch.tensor,
          material_property: torch.tensor) -> torch.tensor:
    """ Compute new position based on acceleration and current position.
    The model produces the output in normalized space so we apply inverse
    normalization.

    Args:
      normalized_acceleration: Normalized acceleration (nparticles, dim).
      position_sequence: Position sequence of shape (nparticles, dim).

    Returns:
      torch.tensor: New position of the particles.

    """
    acceleration_stats = self._normalization_stats["acceleration"]
    acceleration = (normalized_acceleration * acceleration_stats['std']) + acceleration_stats['mean']

    # Use an Euler integrator to go from acceleration to position, assuming
    # a dt=1 corresponding to the size of the finite difference.
    most_recent_position = position_sequence[:, -1]
    most_recent_velocity = most_recent_position - position_sequence[:, -2]

    # TODO: Fix dt
    new_velocity = most_recent_velocity + acceleration  # * dt = 1
    new_position = most_recent_position + new_velocity  # * dt = 1

    return new_position

    # # 1. 加速度裁剪（防止数值爆炸）
    # MAX_ACC = 0.005  # 最大允许加速度
    # acceleration = torch.clamp(acceleration, -MAX_ACC, MAX_ACC)
    #
    # # 2. 使用更稳定的半隐式积分器
    # most_recent_position = position_sequence[:, -1]
    # prev_position = position_sequence[:, -2]
    # dt = 5e-6  # 假设dt=1
    #
    # # 当前速度 = (当前位 - 前一位) / dt
    # velocity = (most_recent_position - prev_position) / dt
    #
    # # 新速度 = 当前速度 + 加速度 * dt
    # new_velocity = velocity + acceleration * dt
    #
    # # 3. 速度限幅（防止飞散）
    # MAX_VEL = 0.032  # 最大速度
    # # 应用分速度方向限制
    # if self._connectivity_radius > 0:  # 防止除以零
    #     # 确定维度（2D还是3D）
    #     is_3d = (new_velocity.shape[1] == 3)
    #
    #     # 应用分量约束
    #     # x方向约束
    #     new_velocity_x = torch.clamp(new_velocity[:, 0],
    #                                  -self._connectivity_radius,
    #                                  self._connectivity_radius)
    #     # y方向约束
    #     new_velocity_y = torch.clamp(new_velocity[:, 1],
    #                                  -self._connectivity_radius,
    #                                  self._connectivity_radius)
    #
    #     # z方向约束（如果是3D）
    #     if is_3d:
    #         new_velocity_z = torch.clamp(new_velocity[:, 2],
    #                                      -MAX_VEL,
    #                                      MAX_VEL)
    #
    #     # 重新组合速度向量
    #     if is_3d:
    #         new_velocity = torch.stack([new_velocity_x, new_velocity_y, new_velocity_z], dim=1)
    #     else:
    #         new_velocity = torch.stack([new_velocity_x, new_velocity_y], dim=1)
    #
    #     # 原始模长约束保持
    #     velocity_norms = torch.norm(new_velocity, dim=1, keepdim=True)
    #     scale_factors = torch.min(torch.tensor(1.0, device=new_velocity.device),
    #                               MAX_VEL / (velocity_norms + 1e-6))
    #     safe_new_velocity = new_velocity * scale_factors
    # else:
    #     safe_new_velocity = new_velocity
    #
    # # 4. 计算新位置 = 当前位置 + 安全新速度 * dt
    # new_position = most_recent_position + safe_new_velocity * dt
    #
    # # 5. 二次位置稳定性检查
    # # 计算位移大小
    # displacement = torch.norm(new_position - most_recent_position, dim=1)
    #
    # # 如果位移过大，使用线性插值回退
    # MAX_DISP = self._connectivity_radius * 0.1  # 最大位移不超过连接半径的10%
    # if MAX_DISP > 0:
    #     oversized = displacement > MAX_DISP
    #     if torch.any(oversized):
    #         direction = new_position - most_recent_position
    #         safe_direction = direction * (MAX_DISP / (displacement + 1e-6)).unsqueeze(-1)
    #         safe_position = most_recent_position + safe_direction
    #         new_position = torch.where(oversized.unsqueeze(-1), safe_position, new_position)
    #
    #         # 应用边界约束
    #         if material_property is not None:
    #             # 确保material_property是二维的
    #             if material_property.dim() == 1:
    #                 # 转换为一维特征为二维
    #                 material_property = material_property.unsqueeze(-1)
    #
    #             # 应用分离的边界约束
    #             new_position = self._apply_boundary_constraints(
    #                 new_position, material_property)
    # return new_position

  def predict_positions(
          self,
          current_positions: torch.tensor,
          nparticles_per_example: torch.tensor,
          particle_types: torch.tensor,
          material_property: torch.tensor) -> torch.tensor:
    """Predict position based on acceleration.

    Args:
      current_positions: Current particle positions (nparticles, dim).
      nparticles_per_example: Number of particles per example. Default is 2
        examples per batch.
      particle_types: Particle types with shape (nparticles).
      material_property: Shape is (nparticles, 8)

    Returns:
      next_positions (torch.tensor): Next position of particles.
    """
    # 验证SDF值
    # if material_property is not None:
    #     # 确保material_property是二维的
    #     if material_property.dim() == 1:
    #         material_property = material_property.unsqueeze(-1)
    #
    #     # 提取SDF值
    #     sdf_container = material_property[:, 3]  # 容器SDF
    #     sdf_obstacle = material_property[:, 7]  # 障碍物SDF
    #
    #     # 验证容器SDF应为负值
    #     if not torch.all(sdf_container <= 0):
    #         print(f"警告：容器SDF包含正值! 范围: [{sdf_container.min().item()}, {sdf_container.max().item()}]")
    #
    #     # 验证障碍物SDF应为正值
    #     if not torch.all(sdf_obstacle >= 0):
    #         print(f"警告：障碍物SDF包含负值! 范围: [{sdf_obstacle.min().item()}, {sdf_obstacle.max().item()}]")
    #print(f"simulator particle_types: {particle_types}")
    if material_property is not None:
        node_features, edge_index, edge_features = self._encoder_preprocessor(
            current_positions, nparticles_per_example, particle_types, material_property)
    else:
        node_features, edge_index, edge_features = self._encoder_preprocessor(
            current_positions, nparticles_per_example, particle_types)
    predicted_normalized_acceleration = self._encode_process_decode(
        node_features, edge_index, edge_features)
    next_positions = self._decoder_postprocessor(
        predicted_normalized_acceleration, current_positions, material_property)

    return next_positions

  def predict_accelerations(
          self,
          next_positions: torch.tensor,
          position_sequence_noise: torch.tensor,
          position_sequence: torch.tensor,
          nparticles_per_example: torch.tensor,
          particle_types: torch.tensor,
          material_property: torch.tensor):
    """Produces normalized and predicted acceleration targets.

    Args:
      next_positions: Tensor of shape (nparticles_in_batch, dim) with the
        positions the model should output given the inputs.
      position_sequence_noise: Tensor of the same shape as `position_sequence`
        with the noise to apply to each particle.
      position_sequence: A sequence of particle positions. Shape is
        (nparticles, 6, dim). Includes current + last 5 positions.
      nparticles_per_example: Number of particles per example. Default is 2
        examples per batch.
      particle_types: Particle types with shape (nparticles).
      material_property: Shape is (nparticles, 6, 4)

    Returns:
      Tensors of shape (nparticles_in_batch, dim) with the predicted and target
        normalized accelerations.

    """

    # Add noise to the input position sequence.
    noisy_position_sequence = position_sequence + position_sequence_noise
    #print(f"Simulator  particle_types: {particle_types}")
    #print(f"Simulator  material_property: {material_property}")

    # Perform the forward pass with the noisy position sequence.
    if material_property is not None:
        node_features, edge_index, edge_features = self._encoder_preprocessor(
            noisy_position_sequence, nparticles_per_example, particle_types, material_property)
    else:
        node_features, edge_index, edge_features = self._encoder_preprocessor(
            noisy_position_sequence, nparticles_per_example, particle_types)
    predicted_normalized_acceleration = self._encode_process_decode(
        node_features, edge_index, edge_features)

    # Calculate the target acceleration, using an `adjusted_next_position `that
    # is shifted by the noise in the last input position.
    next_position_adjusted = next_positions + position_sequence_noise[:, -1]
    target_normalized_acceleration = self._inverse_decoder_postprocessor(
        next_position_adjusted, noisy_position_sequence)
    # As a result the inverted Euler update in the `_inverse_decoder` produces:
    # * A target acceleration that does not explicitly correct for the noise in
    #   the input positions, as the `next_position_adjusted` is different
    #   from the true `next_position`.
    # * A target acceleration that exactly corrects noise in the input velocity
    #   since the target next velocity calculated by the inverse Euler update
    #   as `next_position_adjusted - noisy_position_sequence[:,-1]`
    #   matches the ground truth next velocity (noise cancels out).

    return predicted_normalized_acceleration, target_normalized_acceleration

  def _inverse_decoder_postprocessor(
          self,
          next_position: torch.tensor,
          position_sequence: torch.tensor):
    """Inverse of `_decoder_postprocessor`.

    Args:
      next_position: Tensor of shape (nparticles_in_batch, dim) with the
        positions the model should output given the inputs.
      position_sequence: A sequence of particle positions. Shape is
        (nparticles, 6, dim). Includes current + last 5 positions.

    Returns:
      normalized_acceleration (torch.tensor): Normalized acceleration.

    """
    previous_position = position_sequence[:, -1]
    previous_velocity = previous_position - position_sequence[:, -2]
    next_velocity = next_position - previous_position
    acceleration = next_velocity - previous_velocity

    acceleration_stats = self._normalization_stats["acceleration"]
    normalized_acceleration = (
        acceleration - acceleration_stats['mean']) / acceleration_stats['std']
    return normalized_acceleration

  def save(
          self,
          path: str = 'model.pt'):
    """Save model state

    Args:
      path: Model path
    """
    torch.save(self.state_dict(), path)

  def load(
          self,
          path: str):
    """Load model state from file

    Args:
      path: Model path
    """
    self.load_state_dict(torch.load(path, map_location=torch.device('cpu')))


def time_diff(
        position_sequence: torch.tensor) -> torch.tensor:
  """Finite difference between two input position sequence

  Args:
    position_sequence: Input position sequence & shape(nparticles, 6 steps, dim)

  Returns:
    torch.tensor: Velocity sequence
  """
  return position_sequence[:, 1:] - position_sequence[:, :-1]
