import torch
from tqdm import tqdm
import os
import sys
sys.path.append('gns')
from gns import learned_simulator


KINEMATIC_PARTICLE = 6
STATIONARY_PARTICLE = 3


# def rollout_with_checkpointing(
#         simulator: learned_simulator.LearnedSimulator,
#         initial_positions: torch.tensor,
#         particle_types: torch.tensor,
#         n_particles_per_example: torch.tensor,
#         nsteps: int,
#         checkpoint_interval: int = 1,
#         material_property: torch.tensor = None,
#         knwon_positions: torch.tensor = None
# ):
#     """ Rollout with gradient checkpointing to reduce memory accumulation over the forward steps during backpropagation.
#     Args:
#       simulator: learned_simulator
#       initial_positions: initial positions of particles for 6 timesteps with shape=(nparticles, 6, ndims).
#       particle_types: particle types shape=(nparticles, ).
#       n_particles_per_example: number of particles.
#       nsteps: number of forward steps to rollout.
#       checkpoint_interval: frequency of gradient checkpointing.
#       material_property: Friction angle normalized by tan() with shape (nparticles, )
#     Returns:
#       GNS rollout of particles positions
#     """
#
#     current_positions = initial_positions
#     predictions = []
#
#     for step in tqdm(range(nsteps), total=nsteps):
#         if step % checkpoint_interval == 0:
#             next_position = torch.utils.checkpoint.checkpoint(
#                 simulator.predict_positions,
#                 current_positions,
#                 [n_particles_per_example],
#                 particle_types,
#                 material_property
#             )
#         else:
#             next_position = simulator.predict_positions(
#                 current_positions,
#                 [n_particles_per_example],
#                 particle_types,
#                 material_property
#             )
#
#         stationary_mask = (particle_types == STATIONARY_PARTICLE).clone().detach()
#         stationary_mask = stationary_mask.bool()[:, None].expand(-1, current_positions.shape[-1])
#         # torch.where(condition, x, y)新张量的每个元素根据condition张量中对应位置的布尔值来从x或y中选择：如果condition在某位置为True，则新张量在该位置的元素来自x；如果为False，则来自y。
#         next_position = torch.where(
#             stationary_mask, knwon_positions, next_position)
#         predictions.append(next_position)
#
#         # Shift `current_positions`, removing the oldest position in the sequence
#         # and appending the next position at the end.
#         current_positions = torch.cat(
#             [current_positions[:, 1:], next_position[:, None, :]], dim=1)
#
#     return torch.cat(
#         (initial_positions.permute(1, 0, 2), torch.stack(predictions))
#     )

def rollout_with_checkpointing(
        simulator: learned_simulator.LearnedSimulator,
        initial_positions: torch.tensor,
        particle_types: torch.tensor,
        n_particles_per_example: torch.tensor,
        nsteps: int,
        checkpoint_interval: int = 1,
        knwon_positions: torch.tensor = None,
        sequence_length: int = None,
        material_property: torch.tensor = None,
        post_step_fn: callable = None,
        material_property_fn: callable = None
):
    """ Rollout with gradient checkpointing to reduce memory accumulation over the forward steps during backpropagation.
    Args:
      simulator: learned_simulator
      initial_positions: initial positions of particles for 6 timesteps with shape=(nparticles, 6, ndims).
      particle_types: particle types shape=(nparticles, ).
      n_particles_per_example: number of particles.
      nsteps: number of forward steps to rollout.
      checkpoint_interval: frequency of gradient checkpointing.
      knwon_positions: known positions for stationary particles.
      sequence_length: length of input sequence.
      material_property: material properties (optional).
      post_step_fn: function to apply after each step.
      material_property_fn: function to generate material properties per step.
    Returns:
      GNS rollout of particles positions
    """

    # 确保所有输入使用float32类型
    current_positions = initial_positions.float()
    predictions = []
    device = initial_positions.device
    nparticles = initial_positions.shape[0]
    nsequence = initial_positions.shape[1]
    ndims = initial_positions.shape[2]

    # 确保粒子类型是整数类型
    particle_types = particle_types.long()

    # 确保material_property是float32类型
    if material_property is not None:
        material_property = material_property.to(device).float()
        # 如果material_property是二维的，添加时间维度
        if material_property.dim() == 2:
            material_property = material_property.unsqueeze(1).expand(-1, nsequence, -1)

    # 如果sequence_length未提供，使用输入序列长度
    if sequence_length is None:
        sequence_length = nsequence

    # 创建material_property缓冲区
    if material_property_fn is not None:
        # 使用动态函数生成material_property
        mat_prop = material_property_fn(current_positions, 0)
    else:
        # 使用静态material_property
        mat_prop = material_property

    #print(f"mat_prop: {mat_prop.shape}")

    # 确保mat_prop是三维的 (nparticles, sequence_length, features)
    if mat_prop is not None and mat_prop.dim() == 2:
        mat_prop = mat_prop.unsqueeze(1).expand(-1, sequence_length, -1)

    for step in range(nsteps):
        # 如果使用动态函数，生成当前步的material_property
        if material_property_fn is not None:
            mat_prop = material_property_fn(current_positions, step)
            # 确保mat_prop是三维的
            if mat_prop.dim() == 2:
                mat_prop = mat_prop.unsqueeze(1).expand(-1, sequence_length, -1)

        # 确保n_particles_per_example是整数
        if isinstance(n_particles_per_example, torch.Tensor):
            n_particles_per_example = n_particles_per_example.item()
        #print(f"rollout_with_checkpointing n_particles_per_example: {n_particles_per_example}")

        # 在调用predict_positions时，传递整数而不是张量
        if step % checkpoint_interval == 0:
            next_position = torch.utils.checkpoint.checkpoint(
                simulator.predict_positions,
                current_positions,
                n_particles_per_example,  # 传递整数
                particle_types,
                mat_prop
            )
        else:
            next_position = simulator.predict_positions(
                current_positions,
                n_particles_per_example,  # 传递整数
                particle_types,
                mat_prop
            )

        # 应用后处理函数
        if post_step_fn is not None:
            next_position = post_step_fn(current_positions, next_position, particle_types, step)
        #print(f"knwon_positions: {knwon_positions.shape}")
        # 处理静止粒子
        if knwon_positions is not None:
            STATIONARY_PARTICLE = 3  # 静止粒子类型
            stationary_mask = (particle_types == STATIONARY_PARTICLE).clone().detach()
            stationary_mask = stationary_mask.bool()[:, None].expand(-1, next_position.shape[-1])
            next_position = torch.where(
                stationary_mask, knwon_positions, next_position)

        predictions.append(next_position)

        # 更新当前位置序列
        current_positions = torch.cat(
            [current_positions[:, 1:], next_position[:, None, :]], dim=1)

    # 组合初始位置和预测结果
    initial_positions_permuted = initial_positions.permute(1, 0, 2)
    predictions_tensor = torch.stack(predictions)
    full_trajectory = torch.cat([initial_positions_permuted, predictions_tensor], dim=0)

    return full_trajectory
