import collections
import json

import os
os.environ["PL_TORCH_DISTRIBUTED_BACKEND"] = "gloo"
#os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
#os.environ["CUDA_VISIBLE_DEVICES"] = "0" #GPU编号
import pickle
import glob
import re
import sys
import torch.nn.functional as F
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from tqdm import tqdm

from absl import flags
from absl import app

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from gns import learned_simulator
from gns import noise_utils
from gns import reading_utils
from gns import data_loader
#from gns import data_loader_origin
from gns import distribute

# 粒子物理属性
PARTICLE_PROPERTIES = {
    6: {"density": 2680.0, "radius": 0.002},  # 沙: 密度(kg/m³), 半径(m)
    0: {"density": 2196.0, "radius": 0.004},  # 固体: 密度(kg/m³), 半径(m)
}
pos_weight = 0.6
centroid_weight = 0.4

flags.DEFINE_enum(
    'mode', 'train', ['train', 'valid', 'rollout'],
    help='Train model, validation or rollout evaluation.')
flags.DEFINE_integer('batch_size', 1, help='The batch size.')
flags.DEFINE_float('noise_std', 6.7e-4, help='The std deviation of the noise.')
flags.DEFINE_string('data_path', None, help='The dataset directory.')
flags.DEFINE_string('model_path', 'models/', help=('The path for saving checkpoints of the model.'))
flags.DEFINE_string('output_path', 'rollouts/', help='The path for saving outputs (e.g. rollouts).')
flags.DEFINE_string('output_filename', 'rollout', help='Base name for saving the rollout')
flags.DEFINE_string('model_file', None, help=('Model filename (.pt) to resume from. Can also use "latest" to default to newest file.'))
flags.DEFINE_string('train_state_file', 'train_state.pt', help=('Train state filename (.pt) to resume from. Can also use "latest" to default to newest file.'))

flags.DEFINE_integer('ntraining_steps', int(2E7), help='Number of training steps.')
flags.DEFINE_integer('validation_interval', None, help='Validation interval. Set `None` if validation loss is not needed')
flags.DEFINE_integer('nsave_steps', int(2500), help='Number of steps at which to save the model.')

# Learning rate parameters
flags.DEFINE_float('lr_init', 1e-4, help='Initial learning rate.')
flags.DEFINE_float('lr_decay', 0.1, help='Learning rate decay.')
flags.DEFINE_integer('lr_decay_steps', int(2e5), help='Learning rate decay steps.')

flags.DEFINE_integer("cuda_device_number", 0, help="CUDA device (zero indexed), default is None so default CUDA device will be used.")
flags.DEFINE_integer("n_gpus", 1, help="The number of GPUs to utilize for training.")

FLAGS = flags.FLAGS

Stats = collections.namedtuple('Stats', ['mean', 'std'])

INPUT_SEQUENCE_LENGTH = 6  # So we can calculate the last 5 velocities.
NUM_PARTICLE_TYPES = 9
KINEMATIC_PARTICLE_ID = 3

def rollout(
        simulator: learned_simulator.LearnedSimulator,
        position: torch.tensor,
        particle_types: torch.tensor,
        material_property: torch.tensor,
        n_particles_per_example: torch.tensor,
        nsteps: int,
        device: torch.device):
  """
  Rolls out a trajectory by applying the model in sequence.

  Args:
    simulator: Learned simulator.
    position: Positions of particles (timesteps, nparticles, ndims)
    particle_types: Particles types with shape (nparticles)
    material_property: Friction angle normalized by tan() with shape (nparticles)
    n_particles_per_example
    nsteps: Number of steps.
    device: torch device.
  """
  initial_positions = position[:, :INPUT_SEQUENCE_LENGTH]
  #initial_sdfs = material_property[:, :1]
  ground_truth_positions = position[:, INPUT_SEQUENCE_LENGTH:]

  # 保存完整的material序列
  ground_truth_sdfs = material_property[:, INPUT_SEQUENCE_LENGTH:]  # 关键修改
  #ground_truth_sdfs = material_property[:, (INPUT_SEQUENCE_LENGTH - 5):]  # 关键修改：向前多取5个时间步

  #ground_truth_sdfs = material_property[:, INPUT_SEQUENCE_LENGTH:]
  # print(f"train r initial_positions: {initial_positions}")
  # print(f"train r initial_sdfs: {initial_sdfs}")

  current_positions = initial_positions
  #current_material_window = ground_truth_sdfs[:, :INPUT_SEQUENCE_LENGTH]  # 初始化material窗口（前6个时间步）
  #current_sdfs = initial_sdfs
  predictions = []
  #print(f"train  ground_truth_sdfs: {ground_truth_sdfs.shape}")


  for step in tqdm(range(nsteps), total=nsteps):
  #for step in tqdm(range(591), total=591):
    # Get next position with shape (nnodes, dim)
    # 从保存的material序列中获取当前步的material
    current_material = ground_truth_sdfs[:, step: step+1].squeeze(1)  # [875, 1, 6]
    #current_material = current_material_window[:, step:step + INPUT_SEQUENCE_LENGTH]  # [批次, 6, 8]
    #print(f"train r current_material: {current_material.shape}")

    next_position = simulator.predict_positions(
        current_positions,
        nparticles_per_example=[n_particles_per_example],
        particle_types=particle_types,
        material_property=current_material
    )
    #print(f"train r particle_types: {particle_types}")

    # Update kinematic particles from prescribed trajectory.
    kinematic_mask = (particle_types == KINEMATIC_PARTICLE_ID).clone().detach().to(device)
    next_position_ground_truth = ground_truth_positions[:, step]
    kinematic_mask = kinematic_mask.bool()[:, None].expand(-1, current_positions.shape[-1])
    next_position = torch.where(
        kinematic_mask, next_position_ground_truth, next_position)
    predictions.append(next_position)

    # Shift `current_positions`, removing the oldest position in the sequence
    # and appending the next position at the end.
    current_positions = torch.cat(
        [current_positions[:, 1:], next_position[:, None, :]], dim=1)

    # 更新material窗口：添加下一步的ground truth material
    # if step < nsteps - 1:  # 确保不越界
    #   next_step_material = ground_truth_sdfs[:, (step + INPUT_SEQUENCE_LENGTH) : (step + INPUT_SEQUENCE_LENGTH + 1)]  # 下个时间步的material
    #   current_material_window = torch.cat(
    #       [current_material_window[:, 1:], next_step_material], dim=1)


  # Predictions with shape (time, nnodes, dim)
  predictions = torch.stack(predictions)
  ground_truth_positions = ground_truth_positions.permute(1, 0, 2)

  loss = (predictions - ground_truth_positions) ** 2


  output_dict = {
      'initial_positions': initial_positions.permute(1, 0, 2).cpu().numpy(),
      'predicted_rollout': predictions.cpu().numpy(),
      'ground_truth_rollout': ground_truth_positions.cpu().numpy(),
      'particle_types': particle_types.cpu().numpy(),
      'material_property': material_property.cpu().numpy() if material_property is not None else None
  }

  return output_dict, loss


def predict(device: str):
  """Predict rollouts.

  Args:
    simulator: Trained simulator if not will undergo training.

  """
  # Read metadata
  metadata = reading_utils.read_metadata(FLAGS.data_path, "rollout")
  simulator = _get_simulator(metadata, FLAGS.noise_std, FLAGS.noise_std, device)

  # Load simulator
  if os.path.exists(FLAGS.model_path + FLAGS.model_file):
    simulator.load(FLAGS.model_path + FLAGS.model_file)
  else:
    raise Exception(f"Model does not exist at {FLAGS.model_path + FLAGS.model_file}")

  simulator.to(device)
  simulator.eval()

  # Output path
  if not os.path.exists(FLAGS.output_path):
    os.makedirs(FLAGS.output_path)

  # Use `valid`` set for eval mode if not use `test`
  split = 'test' if (FLAGS.mode == 'rollout' or (not os.path.isfile("{FLAGS.data_path}valid.npz"))) else 'valid'

  # Get dataset
  #ds = data_loader_origin.get_data_loader_by_trajectories(path=f"{FLAGS.data_path}{split}.npz")
  ds = data_loader.get_data_loader_by_trajectories(path=f"{FLAGS.data_path}{split}.npz")
  # See if our dataset has material property as feature
  if len(ds.dataset._data[0]) == 3:  # `ds` has (positions, particle_type, material_property)
    material_property_as_feature = True
  elif len(ds.dataset._data[0]) == 2:  # `ds` only has (positions, particle_type)
    material_property_as_feature = False
  else:
    raise NotImplementedError

  eval_loss = []
  with torch.no_grad():
    for example_i, features in enumerate(ds):
      print(f"processing example number {example_i}")
      positions = features[0].to(device)
      if metadata['sequence_length'] is not None:
        # If `sequence_length` is predefined in metadata,
        nsteps = metadata['sequence_length'] - INPUT_SEQUENCE_LENGTH
      else:
        # If no predefined `sequence_length`, then get the sequence length
        sequence_length = positions.shape[1]
        nsteps = sequence_length - INPUT_SEQUENCE_LENGTH
      particle_type = features[1].to(device)
      if material_property_as_feature:
        material_property = features[2].to(device)
        n_particles_per_example = torch.tensor([int(features[3])], dtype=torch.int32).to(device)
      else:
        material_property = None
        n_particles_per_example = torch.tensor([int(features[2])], dtype=torch.int32).to(device)
      print(f"train nparticles_per_example: {n_particles_per_example}")

      # Predict example rollout
      example_rollout, loss = rollout(simulator,
                                      positions,
                                      particle_type,
                                      material_property,
                                      n_particles_per_example,
                                      nsteps,
                                      device)

      example_rollout['metadata'] = metadata
      print("Predicting example {} loss: {}".format(example_i, loss.mean()))
      eval_loss.append(torch.flatten(loss))

      # Save rollout in testing
      if FLAGS.mode == 'rollout':
        example_rollout['metadata'] = metadata
        example_rollout['loss'] = loss.mean()
        filename = f'{FLAGS.output_filename}_ex{example_i}.pkl'
        filename = os.path.join(FLAGS.output_path, filename)
        with open(filename, 'wb') as f:
          pickle.dump(example_rollout, f)

  print("Mean loss on rollout prediction: {}".format(
      torch.mean(torch.cat(eval_loss))))


def optimizer_to(optim, device):
  for param in optim.state.values():
    # Not sure there are any global tensors in the state dict
    if isinstance(param, torch.Tensor):
      param.data = param.data.to(device)
      if param._grad is not None:
        param._grad.data = param._grad.data.to(device)
    elif isinstance(param, dict):
      for subparam in param.values():
        if isinstance(subparam, torch.Tensor):
          subparam.data = subparam.data.to(device)
          if subparam._grad is not None:
            subparam._grad.data = subparam._grad.data.to(device)

def acceleration_loss(pred_acc, target_acc, non_kinematic_mask):
  """
  Compute the loss between predicted and target accelerations.

  Args:
    pred_acc: Predicted accelerations.
    target_acc: Target accelerations.
    non_kinematic_mask: Mask for kinematic particles.
  """
  loss = (pred_acc - target_acc) ** 2
  loss = loss.sum(dim=-1)
  num_non_kinematic = non_kinematic_mask.sum()
  loss = torch.where(non_kinematic_mask.bool(),
                    loss, torch.zeros_like(loss))
  loss = loss.sum() / num_non_kinematic
  return loss
#
# def position_centered_loss(pred_acc, target_acc,
#                           predicted_next_position, next_positions_target,
#                           particle_types, non_kinematic_mask,
#                           pos_weight, centroid_weight):
#     """
#     优化后的损失函数
#     """
#     # 计算质量（保持不变）
#     masses = torch.zeros_like(pred_acc[:, 0])
#     for ptype, properties in PARTICLE_PROPERTIES.items():
#         mask = (particle_types == ptype)
#         density = properties["density"]
#         radius = properties["radius"]
#         volume = (4 / 3) * torch.pi * (radius ** 3)
#         mass = density * volume
#         masses[mask] = mass
#
#     """1. 加速度损失（核心）"""
#     acc_loss = (pred_acc - target_acc) ** 2
#     acc_loss = acc_loss.sum(dim=-1)
#     num_non_kinematic = non_kinematic_mask.sum()
#     acc_loss = torch.where(non_kinematic_mask.bool(),
#                            acc_loss, torch.zeros_like(acc_loss))
#     acc_loss = acc_loss.sum() / num_non_kinematic
#
#     """2. 位置损失（重要）"""
#     target_position = next_positions_target
#     pred_position = predicted_next_position
#     pos_loss = (pred_position - target_position) ** 2
#     pos_loss = pos_loss.sum(dim=-1)
#     pos_loss = torch.where(non_kinematic_mask.bool(),
#                            pos_loss, torch.zeros_like(pos_loss))
#     pos_loss = pos_loss.sum() / num_non_kinematic
#
#     """4. 颗粒穿透惩罚（重要）"""
#     particle_radii = torch.zeros_like(particle_types, dtype=torch.float32)
#     particle_radii[particle_types == 0] = 0.004
#     particle_radii[particle_types == 6] = 0.002
#
#     intern_diff = predicted_next_position.unsqueeze(1) - predicted_next_position.unsqueeze(0)
#     distances = torch.sqrt(torch.sum(intern_diff ** 2, dim=-1) + 1e-8)
#     radii_sum = particle_radii.unsqueeze(1) + particle_radii.unsqueeze(0)
#
#     relative_overlap = (radii_sum - distances) / (radii_sum + 1e-8)
#     violation = torch.where(
#         relative_overlap > 0,
#         torch.exp(relative_overlap) - 1.0,
#         torch.zeros_like(distances)
#     )
#
#     mask = torch.eye(distances.shape[0], device=distances.device).bool()
#     violation = violation.masked_fill(mask, 0)
#
#     non_kinematic_mask_expanded = non_kinematic_mask.bool().unsqueeze(1) & non_kinematic_mask.bool().unsqueeze(0)
#     inter_penalty = torch.where(non_kinematic_mask_expanded,
#                                 violation,
#                                 torch.zeros_like(violation))
#     inter_penalty = inter_penalty.sum() / (non_kinematic_mask.sum() * (non_kinematic_mask.sum() - 1) + 1e-8)
#
#     """6. 质心对齐损失（可选）"""
#     target_centroid = next_positions_target[non_kinematic_mask.bool()].mean(dim=0)
#     pred_centroid = predicted_next_position[non_kinematic_mask.bool()].mean(dim=0)
#     centroid_diff = pred_centroid - target_centroid
#     centroid_loss = torch.norm(centroid_diff) ** 2  # 改为平方距离
#
#     """组合损失"""
#     total_loss = (
#         acc_loss +
#         pos_weight * pos_loss +
#         inter_penalty +
#         centroid_weight * centroid_loss
#     )
#
#     return total_loss

def cylinder_boundary_loss(positions, particle_types, non_kinematic_mask):
    """
    简化的圆柱边界约束损失（只考虑内半径）
    """
    # 圆柱几何参数
    AXIS_POINT = torch.tensor([0.015, 0.055, 0.055], device=positions.device)
    CYLINDER_RADIUS = 0.05  # 内半径
    X_MIN = 0.0
    X_MAX = 0.03

    # 获取颗粒半径
    particle_radii = torch.zeros_like(positions[:, 0])
    particle_radii[particle_types == 0] = 0.004  # 固体颗粒半径
    particle_radii[particle_types == 6] = 0.002  # 沙颗粒半径

    # 只考虑非运动学颗粒
    positions = positions[non_kinematic_mask.bool()]
    particle_radii = particle_radii[non_kinematic_mask.bool()]

    # 1. 计算到轴线的距离（在YZ平面）
    # 轴线在YZ平面的投影点是(0.055, 0.055)
    yz_positions = positions[:, 1:] - torch.tensor([0.055, 0.055], device=positions.device)
    distance_to_center = torch.norm(yz_positions, dim=1)

    # 2. 径向约束：颗粒不能超出内半径
    # 颗粒表面到轴线的距离 = 颗粒中心到轴线距离 + 颗粒半径
    # 这个值不能超过圆柱半径
    radial_penetration = torch.relu((distance_to_center + particle_radii) - CYLINDER_RADIUS)

    # 3. 轴向约束
    # 下底面（x=0）
    bottom_penetration = torch.relu(-(positions[:, 0] - particle_radii - X_MIN))

    # 上底面（x=0.03）
    top_penetration = torch.relu((positions[:, 0] + particle_radii) - X_MAX)

    # 4. 组合边界损失
    boundary_loss = torch.mean(radial_penetration) + torch.mean(bottom_penetration) + torch.mean(top_penetration)

    return boundary_loss


def position_centered_loss(pred_acc, target_acc, predicted_next_position, particle_types, non_kinematic_mask):
    """
    改进后的损失函数，包含精确的圆筒边界约束
    """
    # 1. 加速度损失
    acc_loss = (pred_acc - target_acc) ** 2
    acc_loss = acc_loss.sum(dim=-1)
    num_non_kinematic = non_kinematic_mask.sum()
    acc_loss = torch.where(non_kinematic_mask.bool(),
                           acc_loss, torch.zeros_like(acc_loss))
    acc_loss = acc_loss.sum() / num_non_kinematic

    # 2. 边界约束损失
    boundary_loss = cylinder_boundary_loss(
        predicted_next_position,
        particle_types,
        non_kinematic_mask
    )

    # 4. 组合损失
    total_loss = acc_loss + boundary_loss

    return total_loss

def save_model_and_train_state(rank, device, simulator, flags, step, epoch, optimizer,
                                train_loss, valid_loss, train_loss_hist, valid_loss_hist):
  """Save model state
  
  Args:
    rank: local rank
    device: torch device type
    simulator: Trained simulator if not will undergo training.
    flags: flags
    step: step
    epoch: epoch
    optimizer: optimizer
    train_loss: training loss at current step
    valid_loss: validation loss at current step
    train_loss_hist: training loss history at each epoch
    valid_loss_hist: validation loss history at each epoch
  """
  if rank == 0 or device == torch.device("cpu"):
      if device == torch.device("cpu"):
          simulator.save(flags["model_path"] + 'model-' + str(step) + '.pt')
      else:
          simulator.module.save(flags["model_path"] + 'model-' + str(step) + '.pt')

      train_state = dict(optimizer_state=optimizer.state_dict(),
                          global_train_state={
                            "step": step, 
                            "epoch": epoch,
                            "train_loss": train_loss,
                            "valid_loss": valid_loss
                            },
                          loss_history={"train": train_loss_hist, "valid": valid_loss_hist}
                          )
      torch.save(train_state, f'{flags["model_path"]}train_state-{step}.pt')
      
def train(rank, flags, world_size, device):
  """Train the model.

  Args:
    rank: local rank
    world_size: total number of ranks
    device: torch device type
  """
  if device == torch.device("cuda"):
    distribute.setup(rank, world_size, device)
    device_id = rank
  else:
    device_id = device

  # Read metadata
  metadata = reading_utils.read_metadata(flags["data_path"], "train")

  # Get simulator and optimizer
  if device == torch.device("cuda"):
    serial_simulator = _get_simulator(metadata, flags["noise_std"], flags["noise_std"], rank)
    simulator = DDP(serial_simulator.to(rank), device_ids=[rank], output_device=rank)
    optimizer = torch.optim.Adam(simulator.parameters(), lr=flags["lr_init"]*world_size)
  else:
    simulator = _get_simulator(metadata, flags["noise_std"], flags["noise_std"], device)
    optimizer = torch.optim.Adam(simulator.parameters(), lr=flags["lr_init"] * world_size)

  # Initialize training state
  step = 0
  epoch = 0
  steps_per_epoch = 0

  valid_loss = None
  epoch_train_loss = 0
  epoch_valid_loss = None

  train_loss_hist = []
  valid_loss_hist = []

  # If model_path does exist and model_file and train_state_file exist continue training.
  if flags["model_file"] is not None:

    if flags["model_file"] == "latest" and flags["train_state_file"] == "latest":
      # find the latest model, assumes model and train_state files are in step.
      fnames = glob.glob(f'{flags["model_path"]}*model*pt')
      max_model_number = 0
      expr = re.compile(".*model-(\d+).pt")
      for fname in fnames:
        model_num = int(expr.search(fname).groups()[0])
        if model_num > max_model_number:
          max_model_number = model_num
      # reset names to point to the latest.
      flags["model_file"] = f"model-{max_model_number}.pt"
      flags["train_state_file"] = f"train_state-{max_model_number}.pt"

    if os.path.exists(flags["model_path"] + flags["model_file"]) and os.path.exists(flags["model_path"] + flags["train_state_file"]):
      # load model
      if device == torch.device("cuda"):
        simulator.module.load(flags["model_path"] + flags["model_file"])
      else:
        simulator.load(flags["model_path"] + flags["model_file"])

      # load train state
      train_state = torch.load(flags["model_path"] + flags["train_state_file"])
      
      # set optimizer state
      optimizer = torch.optim.Adam(
        simulator.module.parameters() if device == torch.device("cuda") else simulator.parameters())
      optimizer.load_state_dict(train_state["optimizer_state"])
      optimizer_to(optimizer, device_id)
      
      # set global train state
      step = train_state["global_train_state"]["step"]
      epoch = train_state["global_train_state"]["epoch"]
      train_loss_hist = train_state["loss_history"]["train"]
      valid_loss_hist = train_state["loss_history"]["valid"]

    else:
      msg = f'Specified model_file {flags["model_path"] + flags["model_file"]} and train_state_file {flags["model_path"] + flags["train_state_file"]} not found.'
      raise FileNotFoundError(msg)

  simulator.train()
  simulator.to(device_id)

  # Get data loader
  get_data_loader = (
    distribute.get_data_distributed_dataloader_by_samples
    if device == torch.device("cuda")
    else data_loader.get_data_loader_by_samples
  )

  # Load training data
  dl = get_data_loader(
      path=f'{flags["data_path"]}train.npz',
      input_length_sequence=INPUT_SEQUENCE_LENGTH,
      batch_size=flags["batch_size"],
  )
  n_features = len(dl.dataset._data[0])

  # Load validation data
  if flags["validation_interval"] is not None:
      dl_valid = get_data_loader(
          path=f'{flags["data_path"]}valid.npz',
          input_length_sequence=INPUT_SEQUENCE_LENGTH,
          batch_size=flags["batch_size"],
      )
      if len(dl_valid.dataset._data[0]) != n_features:
          raise ValueError(
              f"`n_features` of `valid.npz` and `train.npz` should be the same"
          )

  print(f"rank = {rank}, cuda = {torch.cuda.is_available()}")

  try:
    while step < flags["ntraining_steps"]:
      if device == torch.device("cuda"):
        torch.distributed.barrier()

      for example in dl:  
        steps_per_epoch += 1
        # ((position, particle_type, material_property, n_particles_per_example), labels) are in dl
        position = example[0][0].to(device_id)
        particle_type = example[0][1].to(device_id)
        if n_features == 3:  # if dl includes material_property
          material_property = example[0][2].to(device_id)
          n_particles_per_example = example[0][3].to(device_id)
        elif n_features == 2:
          n_particles_per_example = example[0][2].to(device_id)
        else:
          raise NotImplementedError
        labels = example[1].to(device_id)

        n_particles_per_example.to(device_id)
        labels.to(device_id)

        # TODO (jpv): Move noise addition to data_loader
        # Sample the noise to add to the inputs to the model during training.
        sampled_noise = noise_utils.get_random_walk_noise_for_position_sequence(position, noise_std_last_step=flags["noise_std"]).to(device_id)
        non_kinematic_mask = (particle_type != KINEMATIC_PARTICLE_ID).clone().detach().to(device_id)
        sampled_noise *= non_kinematic_mask.view(-1, 1, 1)

        # Get the predictions and target accelerations
        device_or_rank = rank if device == torch.device("cuda") else device
        pred_acc, target_acc = (simulator.module.predict_accelerations if device == torch.device("cuda") else simulator.predict_accelerations)(
            next_positions=labels.to(device_or_rank),
            position_sequence_noise=sampled_noise.to(device_or_rank),
            position_sequence=position.to(device_or_rank),
            nparticles_per_example=n_particles_per_example.to(device_or_rank),
            particle_types=particle_type.to(device_or_rank),
            material_property=material_property.to(device_or_rank) if n_features == 3 else None
        )
        pred_pos = (simulator.module.predict_positions if device == torch.device(
            "cuda") else simulator.predict_positions)(
            current_positions=position,
            nparticles_per_example=n_particles_per_example.to(device_or_rank),
            particle_types=particle_type.to(device_or_rank),
            material_property=material_property.to(device_or_rank) if n_features == 3 else None,
        )
        #print(f"train main material_property {material_property.shape}")
        
        # Validation
        if flags["validation_interval"] is not None:
          sampled_valid_example = next(iter(dl_valid))
          if step > 0 and step % flags["validation_interval"] == 0:
              valid_loss = validation(
                simulator, sampled_valid_example, n_features, flags, rank, device_id)
              print(f"Validation loss at {step}: {valid_loss.item()}")

        # Calculate the loss and mask out loss on kinematic particles
        loss = acceleration_loss(pred_acc, target_acc, non_kinematic_mask)
        # loss = position_centered_loss(pred_acc, target_acc, pred_pos, particle_type, non_kinematic_mask)
        # loss = position_centered_loss(pred_acc, target_acc, pred_pos, labels, particle_type, non_kinematic_mask, pos_weight, centroid_weight)

        train_loss = loss.item()
        epoch_train_loss += train_loss

        # Computes the gradient of loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Update learning rate
        lr_new = flags["lr_init"] * (flags["lr_decay"] ** (step/flags["lr_decay_steps"])) * world_size
        for param in optimizer.param_groups:
          param['lr'] = lr_new
     
        print(f'rank = {rank}, epoch = {epoch}, step = {step}/{flags["ntraining_steps"]}, loss = {train_loss}', flush=True)

        # Save model state
        if rank == 0 or device == torch.device("cpu"):
          if step % flags["nsave_steps"] == 0:
            save_model_and_train_state(rank, device, simulator, flags, step, epoch, 
                                       optimizer, train_loss, valid_loss, train_loss_hist, valid_loss_hist)

        step += 1
        if step >= flags["ntraining_steps"]:
            break

      # Epoch level statistics
      # Training loss at epoch
      epoch_train_loss /= steps_per_epoch
      epoch_train_loss = torch.tensor([epoch_train_loss]).to(device_id)
      if device == torch.device("cuda"):
        torch.distributed.reduce(epoch_train_loss, dst=0, op=torch.distributed.ReduceOp.SUM)
        epoch_train_loss /= world_size

      train_loss_hist.append((epoch, epoch_train_loss.item()))

      # Validation loss at epoch
      if flags["validation_interval"] is not None:
        sampled_valid_example = next(iter(dl_valid))
        epoch_valid_loss = validation(
                simulator, sampled_valid_example, n_features, flags, rank, device_id)
        if device == torch.device("cuda"):
          torch.distributed.reduce(epoch_valid_loss, dst=0, op=torch.distributed.ReduceOp.SUM)
          epoch_valid_loss /= world_size

        valid_loss_hist.append((epoch, epoch_valid_loss.item()))

      # Print epoch statistics
      if rank == 0 or device == torch.device("cpu"):
        print(f'Epoch {epoch}, training loss: {epoch_train_loss.item()}')
        if flags["validation_interval"] is not None:
          print(f'Epoch {epoch}, validation loss: {epoch_valid_loss.item()}')
      
      # Reset epoch training loss
      epoch_train_loss = 0
      if steps_per_epoch >= len(dl):
        epoch += 1
      steps_per_epoch = 0
      
      if step >= flags["ntraining_steps"]:
        break 
      
  except KeyboardInterrupt:
    pass

  # Save model state on keyboard interrupt
  save_model_and_train_state(rank, device, simulator, flags, step, epoch, optimizer, train_loss, valid_loss, train_loss_hist, valid_loss_hist)

  if torch.cuda.is_available():
    distribute.cleanup()


def _get_simulator(
        metadata: json,
        acc_noise_std: float,
        vel_noise_std: float,
        device: torch.device) -> learned_simulator.LearnedSimulator:
  """Instantiates the simulator.

  Args:
    metadata: JSON object with metadata.
    acc_noise_std: Acceleration noise std deviation.
    vel_noise_std: Velocity noise std deviation.
    device: PyTorch device 'cpu' or 'cuda'.
  """

  # Normalization stats
  normalization_stats = {
      'acceleration': {
          'mean': torch.FloatTensor(metadata['acc_mean']).to(device),
          'std': torch.sqrt(torch.FloatTensor(metadata['acc_std'])**2 +
                            acc_noise_std**2).to(device),
      },
      'velocity': {
          'mean': torch.FloatTensor(metadata['vel_mean']).to(device),
          'std': torch.sqrt(torch.FloatTensor(metadata['vel_std'])**2 +
                            vel_noise_std**2).to(device),
      },
  }

  # Get necessary parameters for loading simulator.
  if "nnode_in" in metadata and "nedge_in" in metadata:
    nnode_in = metadata['nnode_in']
    nedge_in = metadata['nedge_in']
  else:
    # Given that there is no additional node feature (e.g., material_property) except for:
    # (position (dim), velocity (dim*6), particle_type (16)),
    nnode_in = 45 if metadata['dim'] == 3 else 30
    nedge_in = metadata['dim'] + 1

  # Init simulator.
  simulator = learned_simulator.LearnedSimulator(
      particle_dimensions=metadata['dim'],
      nnode_in=nnode_in,
      nedge_in=nedge_in,
      latent_dim=128,
      nmessage_passing_steps=10,
      nmlp_layers=2,
      mlp_hidden_dim=128,
      connectivity_radius=metadata['default_connectivity_radius'],
      boundaries=np.array(metadata['bounds']),
      normalization_stats=normalization_stats,
      nparticle_types=NUM_PARTICLE_TYPES,
      particle_type_embedding_size=16,
      boundary_clamp_limit=metadata["boundary_augment"] if "boundary_augment" in metadata else 1.0,
      #stl_path="G:\\EDEM_Data\\new_location\\Drum_static.stl",
      device=device)

  return simulator

def validation(
        simulator,
        example,
        n_features,
        flags,
        rank,
        device_id):

  position = example[0][0].to(device_id)
  particle_types = example[0][1].to(device_id)
  #print(f"train t particle_types: {particle_types}")
  if n_features == 4:  # if dl includes material_property
    material_property = example[0][2].to(device_id)
    n_particles_per_example = example[0][3].to(device_id)
  elif n_features == 3:
    n_particles_per_example = example[0][2].to(device_id)
  else:
    raise NotImplementedError
  labels = example[1].to(device_id)

  # Sample the noise to add to the inputs.
  sampled_noise = noise_utils.get_random_walk_noise_for_position_sequence(
    position, noise_std_last_step=flags["noise_std"]).to(device_id)
  non_kinematic_mask = (particle_types != KINEMATIC_PARTICLE_ID).clone().detach().to(device_id)
  sampled_noise *= non_kinematic_mask.view(-1, 1, 1)

  # Do evaluation for the validation data
  device_or_rank = rank if isinstance(device_id, int) else device_id
  # Select the appropriate prediction function
  predict_accelerations = simulator.module.predict_accelerations if isinstance(device_id, int) else simulator.predict_accelerations
  predict_positions = simulator.module.predict_positions if isinstance(device_id, int) else simulator.predict_positions
  # Get the predictions and target accelerations
  with torch.no_grad():
      pred_acc, target_acc = predict_accelerations(
          next_positions=labels.to(device_or_rank),
          position_sequence_noise=sampled_noise.to(device_or_rank),
          position_sequence=position.to(device_or_rank),
          nparticles_per_example=n_particles_per_example.to(device_or_rank),
          particle_types=particle_types.to(device_or_rank),
          material_property=material_property.to(device_or_rank) if n_features == 4 else None
      )
      pred_pos = predict_positions(
          current_positions=position,
          nparticles_per_example=n_particles_per_example.to(device_or_rank),
          particle_types=particle_types.to(device_or_rank),
          material_property=material_property.to(device_or_rank) if n_features == 3 else None,
      )

  # Compute loss
  loss = acceleration_loss(pred_acc, target_acc, non_kinematic_mask)
  # loss = position_centered_loss(pred_acc, target_acc, pred_pos, particle_types, non_kinematic_mask)
  # loss = position_centered_loss(pred_acc, target_acc, pred_pos, labels, particle_types, non_kinematic_mask, pos_weight, centroid_weight)

  return loss


def main(_):
  """Train or evaluates the model.

  """
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  if device == torch.device('cuda'):
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "29500"

  myflags = reading_utils.flags_to_dict(FLAGS)

  if FLAGS.mode == 'train':
    # If model_path does not exist create new directory.
    if not os.path.exists(FLAGS.model_path):
      os.makedirs(FLAGS.model_path)

    # Train on gpu 
    if device == torch.device('cuda'):
      available_gpus = torch.cuda.device_count()
      print(f"Available GPUs = {available_gpus}")

      # Set the number of GPUs based on availability and the specified number
      if FLAGS.n_gpus is None or FLAGS.n_gpus > available_gpus:
        world_size = available_gpus
        if FLAGS.n_gpus is not None:
          print(f"Warning: The number of GPUs specified ({FLAGS.n_gpus}) exceeds the available GPUs ({available_gpus})")
      else:
        world_size = FLAGS.n_gpus

      # Print the status of GPU usage
      print(f"Using {world_size}/{available_gpus} GPUs")

      # Spawn training to GPUs
      distribute.spawn_train(train, myflags, world_size, device)

    # Train on cpu  
    else:
      rank = None
      world_size = 1
      train(rank, myflags, world_size, device)

  elif FLAGS.mode in ['valid', 'rollout']:
    # Set device
    world_size = torch.cuda.device_count()
    if FLAGS.cuda_device_number is not None and torch.cuda.is_available():
      device = torch.device(f'cuda:{int(FLAGS.cuda_device_number)}')
    #test code
    print(f"device is {device} world size is {world_size}")
    predict(device)


if __name__ == '__main__':
  app.run(main)
