#include "ops/attention.h"
#include "ops/concat.h"
#include "ops/conv_2d.h"
#include "ops/element_binary.h"
#include "ops/element_unary.h"
#include "ops/embedding.h"
#include "ops/flat.h"
#include "ops/linear.h"
#include "ops/noop.h"
#include "ops/pool_2d.h"
#include "ops/softmax.h"
#include "ops/split.h"
#include "tensor.h"
#include "utils/hash-utils.h"
#include <queue>

namespace FlexFlow {

using namespace Legion;

bool ParallelTensorShape::operator==(ParallelTensorShape const &other) const {
  if (this->num_dims != other.num_dims) {
    return false;
  }

  if (this->data_type != other.data_type) {
    return false;
  }

  for (int i = 0; i < this->num_dims; i++) {
    if (this->dims[i].size != other.dims[i].size) {
      return false;
    }

    if (this->dims[i].degree != other.dims[i].degree) {
      return false;
    }
  }

  return true;
}

size_t ParallelTensorShape::get_piece_size() const {
  size_t piece_size = data_type_size(this->data_type);
  for (int i = 0; i < this->num_dims; i++) {
    piece_size *= this->dims[i].size / this->dims[i].degree;
  }
  return piece_size;
}

RecordFormatter ParallelTensorShape::as_dot() const {
  RecordFormatter r;
  for (int i = 0; i < this->num_dims; i++) {
    std::ostringstream oss;
    oss << "" << this->dims[i].size << "/" << this->dims[i].degree;
    r << oss.str();
  }
  return r;
}

bool ParallelTensorBase::update_parallel_ids(int numdim, ParallelDim *dims) {
  int next_parallel_idx = 0;
  for (int i = 0; i < numdim; i++) {
    if (dims[i].degree == 1) {
      dims[i].parallel_idx = -1;
    } else {
      dims[i].parallel_idx = next_parallel_idx;
      next_parallel_idx++;
    }
  }

  return true;
}

void ParallelTensorBase::inline_map(FFConfig &config) {
  printf("inline map tensor\n");
  Context ctx = config.lg_ctx;
  Runtime *runtime = config.lg_hlr;

  RegionRequirement region_req(region, READ_WRITE, EXCLUSIVE, region);
  region_req.add_field(FID_DATA);
  InlineLauncher inline_launcher(region_req);
  physical_region = runtime->map_region(ctx, inline_launcher);
  physical_region.wait_until_valid();
}

void ParallelTensorBase::inline_unmap(FFConfig &config) {
  printf("inline unmap tensor\n");
  Context ctx = config.lg_ctx;
  Runtime *runtime = config.lg_hlr;
  assert(physical_region.is_valid() == true);
  runtime->unmap_region(ctx, physical_region);
}

template <typename T>
T *ParallelTensorBase::get_raw_ptr(FFConfig &config) {
  Context ctx = config.lg_ctx;
  Runtime *runtime = config.lg_hlr;
  RegionRequirement region_req(region, READ_WRITE, EXCLUSIVE, region);
  region_req.add_field(FID_DATA);
  T *raw_ptr = NULL;
  if (num_dims == 1) {
    TensorAccessorW<T, 1> acc(
        physical_region, region_req, FID_DATA, ctx, runtime, true);
    raw_ptr = (T *)acc.ptr;
  } else if (num_dims == 2) {
    TensorAccessorW<T, 2> acc(
        physical_region, region_req, FID_DATA, ctx, runtime, true);
    raw_ptr = (T *)acc.ptr;
  } else if (num_dims == 3) {
    TensorAccessorW<T, 3> acc(
        physical_region, region_req, FID_DATA, ctx, runtime, true);
    raw_ptr = (T *)acc.ptr;
  } else if (num_dims == 4) {
    TensorAccessorW<T, 4> acc(
        physical_region, region_req, FID_DATA, ctx, runtime, true);
    raw_ptr = (T *)acc.ptr;
  } else if (num_dims == 5) {
    TensorAccessorW<T, 5> acc(
        physical_region, region_req, FID_DATA, ctx, runtime, true);
    raw_ptr = (T *)acc.ptr;
  } else {
    printf("wrong num_dims %d", num_dims);
    assert(0);
  }
  return raw_ptr;
}

void ParallelTensorBase::attach_raw_ptr(FFConfig &config,
                                        void *raw_ptr,
                                        bool column_major) {
  Context ctx = config.lg_ctx;
  Runtime *runtime = config.lg_hlr;
  AttachLauncher launcher(EXTERNAL_INSTANCE, region, region);
  std::vector<FieldID> fields(1, FID_DATA);
  const Memory local_sysmem =
      Machine::MemoryQuery(Machine::get_machine())
          .has_affinity_to(runtime->get_executing_processor(ctx))
          .only_kind(Memory::SYSTEM_MEM)
          .first();
  launcher.attach_array_soa(raw_ptr, column_major, fields, local_sysmem);
  physical_region = runtime->attach_external_resource(ctx, launcher);
}

void ParallelTensorBase::detach_raw_ptr(FFConfig &config) {
  Context ctx = config.lg_ctx;
  Runtime *runtime = config.lg_hlr;
  runtime->detach_external_resource(ctx, physical_region);
}

template <typename T>
bool ParallelTensorBase::get_input_sub_tensor_via_mappings(
    ParallelConfig const &pc, ParallelTensorBase &tensor) const {
  if (pc.nDims != num_dims) {
    printf("Could not get input subtensor because the number of dimensions do "
           "not match: %d != %d\n",
           pc.nDims,
           num_dims);
    return false;
  }
  std::vector<ParallelDimMappingRecord> mapping;
  T::construct_output_mappings(mapping);
  std::unordered_map<int, int> dim_mapping = input_to_output_mapping(mapping);

  for (int i = 0; i < this->num_dims; i++) {
    assert(pc.dim[dim_mapping.at(i)] == dims[i].degree);
    tensor.dims[i].size = dims[i].size / dims[i].degree;
  }

  return true;
}

bool ParallelTensorBase::get_sub_tensor(MachineView const &mv,
                                        ParallelTensorBase &sub_tensor) const {
  sub_tensor.num_dims = this->num_dims;
  for (int i = 0; i < sub_tensor.num_dims; i++) {
    sub_tensor.dims[i] = this->dims[i];
    if (this->dims[i].parallel_idx != -1) {
      int idx = this->dims[i].parallel_idx;
      assert(idx >= 0);
      assert(this->dims[i].degree == mv.dim[idx]);
      sub_tensor.dims[i].size /= mv.dim[idx];
      sub_tensor.dims[i].degree /= mv.dim[idx];
    }
  }
  return true;
}

bool ParallelTensorBase::get_input_sub_tensor(ParallelConfig const &pc,
                                              ParallelTensorBase &tensor,
                                              OperatorType type) {
  // TODO: consider reduction dim for conv2d and linear
  switch (type) {
    case OP_FLAT: {
      assert(pc.nDims == 3 &&
             "Invalid dimension for parallel config of OP_FLAT");

      tensor.num_dims = this->num_dims;
      for (int i = 0; i < 3; i++) {
        assert(tensor.dims[i].size % pc.dim[i] == 0);
        tensor.dims[i].size = tensor.dims[i].size / pc.dim[i];
      }
      break;
    }
    case OP_RESHAPE: {
      for (int i = 0; i < pc.nDims - 1; i++) {
        assert(pc.dim[i] == 1 && "Assuming data parallel for RESHAPE");
      }
      int batchDim = pc.dim[pc.nDims - 1];
      if (dims[num_dims - 1].size % batchDim != 0) {
        printf("Could not get input subtensor because the dimension is not "
               "divisiable: %d %% %d != 0\n",
               dims[num_dims - 1].size,
               batchDim);
      }
      tensor.num_dims = num_dims;
      for (int i = num_dims - 2; i >= 0; i--) {
        tensor.dims[i].size = dims[i].size;
      }
      tensor.dims[num_dims - 1].size = dims[num_dims - 1].size / batchDim;
      break;
    }
    case OP_LINEAR: {
      if (pc.nDims != num_dims) {
        printf("Could not get input subtensor because the number of dimensions "
               "do not match: %d != %d\n",
               pc.nDims,
               num_dims);
        return false;
      }
      tensor.num_dims = num_dims;
      for (int i = 0; i < num_dims; i++) {
        if (dims[i].size % pc.dim[i] != 0) {
          printf("Could not get input subtensor because the given dimension is "
                 "not divisible: %d %% %d != 0\n",
                 dims[i].size,
                 pc.dim[i]);
          return false;
        }
        tensor.dims[i].size = dims[i].size / pc.dim[i];
      }
      tensor.dims[0].size = dims[0].size;
      tensor.data_type = data_type;
      break;
    }
    case OP_CONV2D:
      if (!this->get_input_sub_tensor_via_mappings<Conv2D>(pc, tensor)) {
        return false;
      }
      break;
    case OP_POOL2D:
      if (!this->get_input_sub_tensor_via_mappings<Pool2D>(pc, tensor)) {
        return false;
      }
      break;
    default: {
      if (pc.nDims != num_dims) {
        printf("Could not get input subtensor because the number of dimensions "
               "do not match: %d != %d\n",
               pc.nDims,
               num_dims);
        return false;
      }
      for (int i = 0; i < num_dims; i++) {
        if (dims[i].size % pc.dim[i] != 0) {
          printf("Could not get input subtensor because the given dimension is "
                 "not divisible: %d %% %d != 0\n",
                 dims[i].size,
                 pc.dim[i]);
          return false;
        }
      }
      tensor.num_dims = num_dims;
      for (int i = 0; i < num_dims; i++) {
        tensor.dims[i].size = dims[i].size / pc.dim[i];
      }
      tensor.data_type = data_type;
    } break;
  }
  return true;
}

bool ParallelTensorBase::get_output_sub_tensor(ParallelConfig const &pc,
                                               ParallelTensorBase &tensor,
                                               OperatorType type) {
  if (pc.nDims != num_dims) {
    printf("Could not get output subtensor because the number of dimensions do "
           "not match: %d != %d\n",
           pc.nDims,
           num_dims);
    return false;
  }
  for (int i = 0; i < num_dims; i++) {
    if (dims[i].size % pc.dim[i] != 0) {
      printf("Could not get output subtensor because the given dimension is "
             "not divisible: %d %% %d != 0\n",
             dims[i].size,
             pc.dim[i]);
      return false;
    }
  }
  tensor.num_dims = num_dims;
  for (int i = 0; i < num_dims; i++) {
    tensor.dims[i].size = dims[i].size / pc.dim[i];
  }
  tensor.data_type = data_type;
  return true;
}

Domain ParallelTensorBase::get_domain() const {
  Domain d;
  d.dim = this->num_dims;
  for (int i = 0; i < this->num_dims; i++) {
    d.rect_data[i] = 0;
    d.rect_data[i + d.dim] = this->dims[i].size - 1;
  }
  return d;
}

bool ParallelTensorBase::check_valid() const {
  bool used[MAX_TENSOR_DIM];
  for (int i = 0; i < MAX_TENSOR_DIM; i++) {
    used[i] = false;
  }
  for (int i = 0; i < num_dims; i++) {
    if (dims[i].size < 0) {
      return false;
    }
    if (dims[i].size % dims[i].degree != 0) {
      return false;
    }
    if (dims[i].parallel_idx > MAX_TENSOR_DIM) {
      return false;
    }
    assert(dims[i].parallel_idx >= -1);
    assert(dims[i].degree >= 1);
    if (dims[i].parallel_idx >= 0) {
      if (used[dims[i].parallel_idx]) {
        return false;
      }
      used[dims[i].parallel_idx] = true;
    }
  }
  assert(this->data_type != DT_NONE);
  int idx = 0;
  while (used[idx]) {
    idx++;
  }
  for (int i = idx; i < MAX_TENSOR_DIM; i++) {
    if (used[i]) {
      return false;
    }
  }
  return true;
}

namespace FlexFlow {

ParallelTensorShape ParallelTensorBase::get_shape() const {
  ParallelTensorShape shape;
  shape.num_dims = this->num_dims;
  shape.data_type = this->data_type;
  for (int i = 0; i < this->num_dims; i++) {
    shape.dims[i] = this->dims[i];
  }

  return shape;
}

bool ParallelTensorBase::is_valid_machine_view(MachineView const &view) const {
  int is_dim = 0;
  for (int i = 0; i < num_dims; i++) {
    if (dims[i].parallel_idx != -1) {
      is_dim++;
      if (dims[i].parallel_idx > view.ndims) {
        return false;
      }
      if (view.dim[dims[i].parallel_idx] != dims[i].degree) {
        return false;
      }
    }
  }
  if (is_dim == 0) {
    is_dim = 1;
  }
  if (is_dim != view.ndims) {
    return false;
  }
  if (get_total_num_parts() != view.num_parts()) {
    return false;
  }
  return true;
}

template <typename T>
bool ParallelTensorBase::set_tensor(FFModel const *ff,
                                    std::vector<int> const &dim_sizes,
                                    T const *data) {
  Context ctx = ff->config.lg_ctx;
  Runtime *runtime = ff->config.lg_hlr;
  // TODO: check data type matches
  // TODO: Currently we use a task launch, change to index launch for NCCL
  // parameter
  size_t volume = 1, num_replicas = 0;
  if (sync_type == ParameterSyncType::NCCL) {
    Domain domain = runtime->get_index_space_domain(ctx, parallel_is);
    num_replicas = domain.get_volume();
  } else if (sync_type == ParameterSyncType::PS) {
    num_replicas = 1;
  } else {
    num_replicas = 1;
  }
  for (size_t i = 0; i < dim_sizes.size(); i++) {
    volume = volume * dim_sizes[i];
  }
  RegionRequirement req(region, READ_WRITE, EXCLUSIVE, region);
  req.add_field(FID_DATA);
  InlineLauncher launcher(req);
  PhysicalRegion pr = runtime->map_region(ctx, launcher);
  pr.wait_until_valid();
  switch (num_dims) {
#define DIMFUNC(DIM)                                                           \
  case DIM: {                                                                  \
    TensorAccessorW<T, DIM> acc(pr, req, FID_DATA, ctx, runtime, true);        \
    assert(acc.rect.volume() == volume * num_replicas);                        \
    T *ptr = acc.ptr;                                                          \
    for (size_t i = 0; i < num_replicas; i++) {                                \
      memcpy(ptr, data, volume * sizeof(T));                                   \
      ptr += volume;                                                           \
    }                                                                          \
    break;                                                                     \
  }
    LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
    default:
      // Unsupported dim
      assert(false);
  }
  runtime->unmap_region(ctx, pr);
  return true;
}

template <typename T>
bool ParallelTensorBase::get_tensor(FFModel const *ff,
                                    T *data,
                                    bool get_gradients) {
  Context ctx = ff->config.lg_ctx;
  Runtime *runtime = ff->config.lg_hlr;
  LogicalRegion weight_lr = LogicalRegion::NO_REGION;
  if (sync_type == ParameterSyncType::PS) {
    weight_lr = get_gradients ? region_grad : region;
  } else {
    assert(owner_op != NULL);
    Domain domain = runtime->get_index_space_domain(ctx, parallel_is);
    switch (domain.get_dim()) {
#define DIMFUNC(DIM)                                                           \
  case DIM: {                                                                  \
    DomainPoint point = Point<DIM>::ZEROES();                                  \
    weight_lr = runtime->get_logical_subregion_by_color(                       \
        ctx, get_gradients ? part_grad : part, point);                         \
    break;                                                                     \
  }
      LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
    }
  }
  // TODO: check data type matches
  size_t volume = 1;
  for (int i = 0; i < num_dims; i++) {
    volume = volume * dims[i].size / dims[i].degree;
  }
  RegionRequirement req(
      weight_lr, READ_ONLY, EXCLUSIVE, get_gradients ? region_grad : region);
  req.add_field(FID_DATA);
  InlineLauncher launcher(req);
  PhysicalRegion pr = runtime->map_region(ctx, launcher);
  pr.wait_until_valid();
  switch (num_dims) {
#define DIMFUNC(DIM)                                                           \
  case DIM: {                                                                  \
    TensorAccessorR<T, DIM> acc(pr, req, FID_DATA, ctx, runtime);              \
    assert(acc.rect.volume() == volume);                                       \
    memcpy(data, acc.ptr, volume * sizeof(T));                                 \
    break;                                                                     \
  }
    LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
    default:
      // Unsupported dim
      assert(false);
  }
  runtime->unmap_region(ctx, pr);
  return true;
}

template float *ParallelTensorBase::get_raw_ptr<float>(FFConfig &config);
template int32_t *ParallelTensorBase::get_raw_ptr<int32_t>(FFConfig &config);

template bool TensorBase::set_tensor<float>(FFModel const *ff,
                                            std::vector<int> const &dims,
                                            float const *data);
template bool TensorBase::get_tensor<float>(FFModel const *ff,
                                            float *data,
                                            bool get_gradients);
template bool TensorBase::set_tensor<double>(FFModel const *ff,
                                             std::vector<int> const &dims,
                                             double const *data);
template bool TensorBase::get_tensor<double>(FFModel const *ff,
                                             double *data,
                                             bool get_gradients);
template bool TensorBase::set_tensor<int32_t>(FFModel const *ff,
                                              std::vector<int> const &dims,
                                              int32_t const *data);
template bool TensorBase::get_tensor<int32_t>(FFModel const *ff,
                                              int32_t *data,
                                              bool get_gradients);
template bool TensorBase::set_tensor<int64_t>(FFModel const *ff,
                                              std::vector<int> const &dims,
                                              int64_t const *data);
template bool TensorBase::get_tensor<int64_t>(FFModel const *ff,
                                              int64_t *data,
                                              bool get_gradients);

template bool ParallelTensorBase::set_tensor<float>(
    FFModel const *ff, std::vector<int> const &dims, float const *data);
template bool ParallelTensorBase::get_tensor<float>(FFModel const *ff,
                                                    float *data,
                                                    bool get_gradients);
template bool ParallelTensorBase::set_tensor<double>(
    FFModel const *ff, std::vector<int> const &dims, double const *data);
template bool ParallelTensorBase::get_tensor<double>(FFModel const *ff,
                                                     double *data,
                                                     bool get_gradients);
template bool ParallelTensorBase::set_tensor<int32_t>(
    FFModel const *ff, std::vector<int> const &dims, int32_t const *data);
template bool ParallelTensorBase::get_tensor<int32_t>(FFModel const *ff,
                                                      int32_t *data,
                                                      bool get_gradients);
template bool ParallelTensorBase::set_tensor<int64_t>(
    FFModel const *ff, std::vector<int> const &dims, int64_t const *data);
template bool ParallelTensorBase::get_tensor<int64_t>(FFModel const *ff,
                                                      int64_t *data,
                                                      bool get_gradients);

template bool TensorBase::get_output_parallel_tensor<float>(FFModel const *ff,
                                                            float *data,
                                                            bool get_gradients);

}; // namespace FlexFlow
