/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <iostream>
#include <iterator>

#include "glog/logging.h"
#include "paddle/phi/core/distributed/auto_parallel/dist_attr.h"
#include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h"
#include "paddle/phi/core/distributed/auto_parallel/proto_helper.h"

namespace phi::distributed {
using phi::distributed::auto_parallel::str_join;
using phi::distributed::auto_parallel::TensorDistAttrProto;

// partial is not allow annotated by user by now.
std::vector<std::string> TensorDistAttr::fields_{
    "process_mesh", "dims_mapping", "batch_dim", "chunk_id", "dynamic_dims"};

TensorDistAttr::TensorDistAttr(const std::vector<int64_t>& tensor_shape) {
  set_default_dims_mapping(tensor_shape);
  set_default_dynamic_dims(tensor_shape);
}

TensorDistAttr::TensorDistAttr(const TensorDistAttr& dist_attr) {
  copy_from(dist_attr);
}

TensorDistAttr& TensorDistAttr::operator=(const TensorDistAttr& dist_attr) {
  if (this == &dist_attr) return *this;
  TensorDistAttr tmp(dist_attr);
  std::swap(this->process_mesh_, tmp.process_mesh_);
  std::swap(this->dims_mapping_, tmp.dims_mapping_);
  std::swap(this->batch_dim_, tmp.batch_dim_);
  std::swap(this->chunk_id_, tmp.chunk_id_);
  std::swap(this->dynamic_dims_, tmp.dynamic_dims_);
  std::swap(this->annotated_, tmp.annotated_);
  std::swap(this->partial_status_, tmp.partial_status_);
  std::swap(this->skip_check_mesh_, tmp.skip_check_mesh_);
  std::swap(this->split_factor_map_, tmp.split_factor_map_);
  return *this;
}

void TensorDistAttr::copy_from(const TensorDistAttr& dist_attr) {
  set_process_mesh(dist_attr.process_mesh());
  set_dims_mapping(dist_attr.multi_dims_mapping());
  split_factor_map_ = dist_attr.split_factor();
  set_batch_dim(dist_attr.batch_dim());
  set_chunk_id(dist_attr.chunk_id());
  set_dynamic_dims(dist_attr.dynamic_dims());
  set_annotated(dist_attr.annotated());
  set_partial_status(dist_attr.partial_status());
  skip_check_mesh_ = dist_attr.skip_check_mesh();
}

void TensorDistAttr::set_process_mesh(const ProcessMesh& process_mesh) {
  process_mesh_ = process_mesh;
}

void TensorDistAttr::set_dims_mapping(
    const std::vector<int64_t>& dims_mapping) {
  dims_mapping_proxy = dims_mapping;
  // dynamic_dims_ and dims_mapping may be not consistent
  if (dynamic_dims_.empty() || dims_mapping.empty()) {
    set_default_dynamic_dims(dims_mapping);
  }
}

void TensorDistAttr::set_dims_mapping(
    const std::vector<std::vector<int64_t>>& dims_mapping) {
  dims_mapping_proxy = dims_mapping;
  // dynamic_dims_ and dims_mapping may be not consistent
  if (dynamic_dims_.empty() || dims_mapping.empty()) {
    set_default_dynamic_dims(std::vector<int64_t>(dims_mapping.size()));
  }
}

void TensorDistAttr::set_batch_dim(int64_t batch_dim) {
  batch_dim_ = batch_dim;
}

void TensorDistAttr::set_chunk_id(const int64_t& chunk_id) {
  chunk_id_ = chunk_id;
}

void TensorDistAttr::set_dynamic_dims(const std::vector<bool>& dynamic_dims) {
  dynamic_dims_ = dynamic_dims;
}

void TensorDistAttr::set_annotated(
    const std::map<std::string, bool>& annotated) {
  annotated_ = annotated;
}

int64_t TensorDistAttr::get_split_factor(int64_t mesh_dim) const {
  PADDLE_ENFORCE_LT(mesh_dim,
                    process_mesh_.ndim(),
                    ::common::errors::InvalidArgument(
                        "Expected mesh dim is less than process mesh dim size, "
                        "but got mesh dim  %d, Process mesh ndim is %d",
                        mesh_dim,
                        process_mesh_.ndim()));
  return split_factor_map_.get_split_factor(mesh_dim);
}

void TensorDistAttr::set_split_factor(int64_t mesh_dim, int64_t split_factor) {
  split_factor_map_.set_split_factor(mesh_dim, split_factor);
}

void TensorDistAttr::clear_split_factor(int64_t mesh_dim) {
  split_factor_map_.clear_split_factor(mesh_dim);
}

const std::set<int64_t> TensorDistAttr::partial_dims() const {
  std::set<int64_t> keys;
  for (auto& kv : partial_status_) {
    keys.emplace(kv.first);
  }
  return keys;
}

void TensorDistAttr::set_partial_status(
    const paddle::flat_hash_map<int64_t, ReduceType>& partial_status) {
  partial_status_ = partial_status;
}

void TensorDistAttr::set_partial_status(const std::vector<int64_t>& dims,
                                        const ReduceType& type) {
  for (const auto& dim : dims) {
    if (is_partial(dim)) {
      PADDLE_THROW(common::errors::InvalidArgument(
          "Trying to Set dim %d as Partial which is already a Partial dim.",
          dim));
    }
    if (is_shard(dim)) {
      PADDLE_THROW(common::errors::InvalidArgument(
          "Trying to Set dim %d as Partial which is a Sharding dim.", dim));
    }
    partial_status_.emplace(dim, type);
  }
}

void TensorDistAttr::clean_partial_status() { partial_status_.clear(); }

void TensorDistAttr::clean_partial_dims(const std::vector<int64_t>& dims) {
  for (const auto& dim : dims) {
    if (partial_status_.count(dim) == 0) {
      PADDLE_THROW(common::errors::InvalidArgument(
          "Trying to clean Partial on dim %d but it is not Partial.", dim));
    } else {
      partial_status_.erase(dim);
    }
  }
}

void TensorDistAttr::set_default_dims_mapping(
    const std::vector<int64_t>& tensor_shape) {
  if (!tensor_shape.empty()) {
    dims_mapping_proxy = std::vector<std::vector<int64_t>>(tensor_shape.size());
  }
}

void TensorDistAttr::set_default_dynamic_dims(
    const std::vector<int64_t>& tensor_shape) {
  dynamic_dims_ = std::vector<bool>(tensor_shape.size(), false);
}

void TensorDistAttr::set_default_dynamic_dims(int64_t tensor_shape_size) {
  dynamic_dims_ = std::vector<bool>(tensor_shape_size, false);
}

void TensorDistAttr::mark_annotated(const std::string& name) {
  auto result = std::find(std::begin(fields_), std::end(fields_), name);
  if (result != std::end(fields_)) {
    annotated_[name] = true;
  }
}

bool TensorDistAttr::verify_process_mesh(
    const ProcessMesh& process_mesh) const {
  VLOG(4) << "[TensorDistAttr verify_process_mesh] "
          << process_mesh.to_string();
  if (!process_mesh_.empty()) {
    for (const auto& dims_mapping : dims_mapping_) {
      for (int64_t mesh_dim : dims_mapping)
        if (mesh_dim >= process_mesh_.ndim()) {
          return false;
        }
    }
  }
  return true;
}

bool TensorDistAttr::verify_dims_mapping(
    const std::vector<std::vector<int64_t>>& dims_mapping,
    const std::vector<int64_t>& tensor_shape) const {
  VLOG(4) << "[TensorDistAttr verify_dims_mapping] " << str_join(dims_mapping);
  if (dims_mapping.size() != tensor_shape.size()) {
    return false;
  }
  std::unordered_map<int64_t, int64_t> map;
  if (!process_mesh_.empty()) {
    for (const auto& mesh_dims : dims_mapping) {
      for (int64_t mesh_dim : mesh_dims) {
        if (mesh_dim >= process_mesh_.ndim()) {
          return false;
        }
        ++map[mesh_dim];
        if (map[mesh_dim] > 1) {
          return false;
        }
      }
    }
  } else {
    for (const auto& mesh_dims : dims_mapping) {
      for (int64_t dim : mesh_dims) {
        ++map[dim];
        if (map[dim] > 1) {
          return false;
        }
      }
    }
  }
  return true;
}

bool TensorDistAttr::verify_batch_dim(
    int64_t dim, const std::vector<int64_t>& tensor_shape) const {
  VLOG(4) << "[TensorDistAttr verify_batch_dim] " << dim;
  int64_t ndim = static_cast<int64_t>(tensor_shape.size());
  if (ndim > 0) {
    if (dim < 0) {
      dim = dim + ndim;
    }
    if (dim < 0 || dim >= ndim) {
      return false;
    }
  }
  return true;
}

bool TensorDistAttr::verify_dynamic_dims(
    const std::vector<bool>& dynamic_dims,
    const std::vector<int64_t>& tensor_shape) const {
  VLOG(4) << "[TensorDistAttr verify_dynamic_dims] " << str_join(dynamic_dims);
  if (!dynamic_dims.empty() && dynamic_dims.size() != tensor_shape.size()) {
    return false;
  }
  return true;
}

bool TensorDistAttr::verify_annotated(
    const std::map<std::string, bool>& annotated) const {
  VLOG(4) << "[TensorDistAttr verify_annotated] " << str_join(annotated);
  for (const auto& item : annotated) {
    auto result = std::find(std::begin(fields_), std::end(fields_), item.first);
    if (result == std::end(fields_)) {
      return false;
    }
  }
  return true;
}

bool TensorDistAttr::verify_partial_status() const {
  VLOG(4) << "[TensorDistAttr verify_partial_status] "
          << partial_status_string();
  for (auto& itr : partial_status_) {
    if (itr.first < 0 || itr.first >= process_mesh_.ndim()) {
      return false;
    }
    if (itr.second < ReduceType::kRedSum || itr.second > ReduceType::kRedAll) {
      return false;
    }
  }
  return true;
}

bool TensorDistAttr::verify(const std::vector<int64_t>& tensor_shape) const {
  if (!verify_process_mesh(process_mesh_)) {
    return false;
  }
  if (!verify_dims_mapping(dims_mapping_, tensor_shape)) {
    return false;
  }
  if (!verify_batch_dim(batch_dim_, tensor_shape)) {
    return false;
  }
  if (!verify_dynamic_dims(dynamic_dims_, tensor_shape)) {
    return false;
  }
  if (!verify_annotated(annotated_)) {
    return false;
  }
  if (!verify_partial_status()) {
    return false;
  }
  return true;
}

bool TensorDistAttr::verify_dynamic(
    const std::vector<int64_t>& tensor_shape) const {
  if (!verify_process_mesh(process_mesh_)) {
    return false;
  }
  if (!verify_dims_mapping(dims_mapping_, tensor_shape)) {
    return false;
  }
  if (!verify_partial_status()) {
    return false;
  }
  return true;
}

std::string TensorDistAttr::to_string() const {
  std::string dist_str;
  dist_str += "{process_mesh: " + process_mesh_.to_string() + ", ";
  dist_str += "dim_mappings: [" + str_join(dims_mapping_) + "], ";
  dist_str += "batch_dim: " + std::to_string(batch_dim_) + ", ";
  dist_str += "chunk_id: " + std::to_string(chunk_id_) + ", ";
  dist_str += "skip_check_mesh: " + std::to_string(skip_check_mesh_) + ", ";
  dist_str += "dynamic_dims: [" + str_join(dynamic_dims_) + "], ";
  dist_str += "annotated: [" + str_join(annotated_) + "], ";
  dist_str += "partial: " + partial_status_string() + ", ";
  dist_str += "split_factor: " + split_factor_map_.to_string() + ".}";
  return dist_str;
}

void TensorDistAttr::from_proto(const TensorDistAttrProto& proto) {
  process_mesh_ = ProcessMesh::from_proto(proto.process_mesh());
  dims_mapping_.resize(proto.dims_mapping_size());

  for (int i = 0; i < proto.dims_mapping_size(); ++i) {
    const auto& mesh_dims_proto = proto.dims_mapping(i);
    std::vector<int64_t> mesh_dims_vec;
    for (int j = 0; j < mesh_dims_proto.mesh_dims_size(); ++j) {
      mesh_dims_vec.push_back(mesh_dims_proto.mesh_dims(j));
    }
    dims_mapping_[i] = mesh_dims_vec;
  }

  batch_dim_ = proto.batch_dim();
  chunk_id_ = proto.chunk_id();
  dynamic_dims_.resize(proto.dynamic_dims_size());
  for (int i = 0; i < proto.dynamic_dims_size(); ++i) {
    dynamic_dims_[i] = proto.dynamic_dims(i);
  }
}

void TensorDistAttr::to_proto(TensorDistAttrProto* proto) const {
  proto->mutable_process_mesh()->CopyFrom(
      phi::distributed::to_proto(process_mesh_));

  for (size_t i = 0; i < dims_mapping_.size(); ++i) {
    proto->add_dims_mapping();
    for (size_t j = 0; j < dims_mapping_.at(i).size(); ++j) {
      proto->mutable_dims_mapping(i)->add_mesh_dims(dims_mapping_[i][j]);
    }
  }

  proto->set_batch_dim(batch_dim_);
  proto->set_chunk_id(chunk_id_);
  for (const auto& i : dynamic_dims_) {
    proto->add_dynamic_dims(i);
  }
}

std::string TensorDistAttr::serialize_to_string() {
  std::string data;
  auto proto = phi::distributed::to_proto(*this);
  proto.SerializeToString(&data);
  PADDLE_ENFORCE_EQ(phi::distributed::to_proto(*this).SerializeToString(&data),
                    true,
                    errors::InvalidArgument(
                        "Failed to serialize tensor dist attr to string."));
  return data;
}

void TensorDistAttr::parse_from_string(const std::string& data) {
  TensorDistAttrProto proto;
  PADDLE_ENFORCE_EQ(
      proto.ParseFromString(data),
      true,
      errors::InvalidArgument(
          "Failed to parse tensor dist attr from string: %s.", data));
  from_proto(proto);
}

bool operator==(const TensorDistAttr& lhs, const TensorDistAttr& rhs) {
  if (lhs.process_mesh() != rhs.process_mesh()) {
    return false;
  }
  if (lhs.multi_dims_mapping() != rhs.multi_dims_mapping()) {
    return false;
  }
  if (lhs.batch_dim() != rhs.batch_dim()) {
    return false;
  }
  if (lhs.chunk_id() != rhs.chunk_id()) {
    return false;
  }
  if (lhs.dynamic_dims() != rhs.dynamic_dims()) {
    return false;
  }
  if (lhs.partial_status() != rhs.partial_status()) {
    return false;
  }
  if (lhs.split_factor() != rhs.split_factor()) {
    return false;
  }
  return true;
}

std::string TensorDistAttr::partial_status_string() const {
  std::string partial_status_str = "[";
  for (auto& itr : partial_status_) {
    partial_status_str += "Partial(dims:" + std::to_string(itr.first) + ", " +
                          ReduceTypeStrings[static_cast<int>(itr.second)] +
                          "), ";
  }
  partial_status_str += "]";
  return partial_status_str;
}

bool TensorDistAttr::empty() const {
  // dims_mapping is empty when the tensor is 0-dim, but it is also be valid.
  return process_mesh_.empty();
}

std::vector<std::shared_ptr<PlacementStatus>> TensorDistAttr::to_placement()
    const {
  auto ndim = process_mesh_.ndim();
  std::vector<std::shared_ptr<PlacementStatus>> placement(
      ndim, std::make_shared<ReplicatedStatus>());
  for (size_t i = 0; i < dims_mapping_.size(); ++i) {
    const auto& cur_dims = dims_mapping_.at(i);
    for (size_t j = 0; j < cur_dims.size(); ++j) {
      int64_t cur_dim = cur_dims.at(j);
      PADDLE_ENFORCE_LT(
          cur_dim,
          ndim,
          errors::InvalidArgument(
              "Split axis %ld can not exceed the ndim of process_mesh %ld",
              cur_dim,
              ndim));
      placement[cur_dim] = std::make_shared<ShardStatus>(i, j);
    }
  }
  for (auto& itr : partial_status_) {
    PADDLE_ENFORCE_LT(
        itr.first,
        ndim,
        errors::InvalidArgument(
            "Partial axis %ld can not exceed the ndim of process_mesh %ld",
            itr.first,
            ndim));
    placement[itr.first] = std::make_shared<PartialStatus>(itr.second);
  }
  return placement;
}

bool TensorDistAttr::is_replicated(int64_t mesh_axis) const {
  auto placement = ToPlacements(*this);
  if (mesh_axis == -1) {
    return std::all_of(
        placement.begin(), placement.end(), [](std::shared_ptr<Placement> p) {
          return p->is_replicated();
        });
  } else {
    return placement[mesh_axis]->is_replicated();
  }
}

bool TensorDistAttr::is_shard(int64_t mesh_axis,
                              std::optional<int64_t> tensor_axis) const {
  auto placement = ToPlacements(*this);
  if (mesh_axis == -1) {
    return std::any_of(placement.begin(),
                       placement.end(),
                       [tensor_axis](std::shared_ptr<Placement> p) {
                         return p->is_shard(tensor_axis);
                       });
  } else {
    return placement[mesh_axis]->is_shard(tensor_axis);
  }
}

bool TensorDistAttr::is_partial(int64_t mesh_axis) const {
  if (mesh_axis == -1) {
    return !partial_status_.empty();
  } else {
    return partial_status_.count(mesh_axis) > 0;
  }
}

void TensorDistAttr::set_skip_check_mesh(bool skip) { skip_check_mesh_ = skip; }

bool TensorDistAttr::is_co_shard() const {
  for (const auto& mesh_dims : dims_mapping_) {
    if (mesh_dims.size() > 1) {
      return true;
    }
  }
  return false;
}

TensorDistAttr::DimMapProxy& TensorDistAttr::DimMapProxy::operator=(
    const std::vector<std::vector<int64_t>>& dims_mapping) {
  dims_mapping_2d->resize(dims_mapping.size());
  *dims_mapping_2d = dims_mapping;
  return *this;
}

TensorDistAttr::DimMapProxy& TensorDistAttr::DimMapProxy::operator=(
    const std::vector<int64_t>& dims_mapping) {
  dims_mapping_1d = dims_mapping;
  sync_2d_map();
  VLOG(4) << "Set 1d dims_mapping, Sync 2d. 1d "
          << auto_parallel::str_join(dims_mapping_1d) << " , 2d  "
          << auto_parallel::str_join(*dims_mapping_2d);
  return *this;
}

TensorDistAttr::DimMapProxy& TensorDistAttr::DimMapProxy::operator=(
    const DimMapProxy& other) {
  if (this == &other) {
    return *this;
  }
  *dims_mapping_2d = (*other.dims_mapping_2d);
  return *this;
}

bool TensorDistAttr::DimMapProxy::operator==(const DimMapProxy& other) {
  return (*dims_mapping_2d) == (*other.dims_mapping_2d);
}

bool TensorDistAttr::DimMapProxy::operator!=(const DimMapProxy& other) {
  return !this->operator==(other);
}

TensorDistAttr::DimMapProxy::operator const std::vector<std::vector<int64_t>>&()
    const {
  return *dims_mapping_2d;
}

TensorDistAttr::DimMapProxy::operator const std::vector<int64_t>&() const {
  sync_1d_map();
  VLOG(4) << "Get 1d dims_mapping, Sync 1d. 1d is "
          << auto_parallel::str_join(dims_mapping_1d) << ", 2d is "
          << auto_parallel::str_join(*dims_mapping_2d);
  return dims_mapping_1d;
}

void TensorDistAttr::DimMapProxy::sync_1d_map() const {
  dims_mapping_1d.resize(dims_mapping_2d->size());
  for (size_t i = 0; i < dims_mapping_2d->size(); ++i) {
    size_t num_mesh_dim = dims_mapping_2d->at(i).size();
    if (num_mesh_dim <= 1) {
      dims_mapping_1d[i] =
          (*dims_mapping_2d)[i].empty() ? -1 : (*dims_mapping_2d)[i][0];
      continue;
    }

    int64_t max_mesh_dim = (*dims_mapping_2d)[i][0];
    int64_t max_mesh_dim_size = process_mesh.shape()[max_mesh_dim];

    for (size_t j = 1; j < num_mesh_dim; ++j) {
      int64_t cur_mesh_dim = (*dims_mapping_2d)[i][j];
      int64_t cur_mesh_dim_size = process_mesh.shape()[cur_mesh_dim];

      if (cur_mesh_dim_size > max_mesh_dim_size) {
        max_mesh_dim = cur_mesh_dim;
        max_mesh_dim_size = cur_mesh_dim_size;
      }
    }

    LOG(WARNING) << "There are " << num_mesh_dim << " shared on tensor dim "
                 << i << ". Now fallback to sharding by mesh dim "
                 << max_mesh_dim << ".";

    dims_mapping_1d[i] = max_mesh_dim;
  }
}

void TensorDistAttr::DimMapProxy::sync_2d_map() {
  dims_mapping_2d->resize(dims_mapping_1d.size());
  for (size_t i = 0; i < dims_mapping_1d.size(); ++i) {
    if (dims_mapping_1d.at(i) == -1) {
      (*dims_mapping_2d)[i] = {};
    } else {
      (*dims_mapping_2d)[i] = {dims_mapping_1d[i]};
    }
  }
}

}  // namespace phi::distributed
