/* Copyright 2023 The OpenXLA Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include "xla/backends/gpu/runtime/p2p_thunk_common.h"

#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>

#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "xla/backends/gpu/runtime/thunk.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/collective_op_group_mode.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/parser/hlo_parser.h"
#include "xla/runtime/device_id.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_placer.h"
#include "xla/service/source_target_pairs.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/platform/statusor.h"
#include "xla/xla_data.pb.h"

namespace xla {
namespace gpu {

absl::Status ExecutionCounters::Initialize(se::StreamExecutor* executor,
                                           RunId run_id) {
  absl::MutexLock lock(mu_);
  CounterKey key = {executor, run_id};
  if (counters_.contains(key)) {
    return absl::OkStatus();
  }
  counters_.emplace(key, 0);
  return absl::OkStatus();
}

absl::StatusOr<int64_t*> ExecutionCounters::GetCounter(
    se::StreamExecutor* executor, RunId run_id) {
  absl::MutexLock lock(mu_);
  CounterKey key = {executor, run_id};
  auto counter = counters_.find(key);
  if (counter == counters_.end()) {
    return absl::InternalError("Execution counter not initialized");
  }

  return &counter->second;
}

absl::StatusOr<std::vector<std::pair<int64_t, int64_t>>> GetSourceTargetPairs(
    mlir::DictionaryAttr frontend_attributes) {
  mlir::StringAttr src_dst_string = frontend_attributes.getAs<mlir::StringAttr>(
      kSendRecvSourceTargetPairsAttr);
  if (!src_dst_string) {
    return absl::AbortedError(
        absl::StrCat("expecting send/recv op with string attribute ",
                     kSendRecvSourceTargetPairsAttr));
  }
  TF_ASSIGN_OR_RETURN(std::vector<ReplicaGroup> replica_groups,
                      ParseReplicaGroupsOnly(src_dst_string.str()));
  std::vector<std::pair<int64_t, int64_t>> source_target_pairs;
  source_target_pairs.reserve(replica_groups.size());
  for (const ReplicaGroup& replica_group : replica_groups) {
    TF_RET_CHECK(replica_group.replica_ids_size() == 2);
    source_target_pairs.emplace_back(replica_group.replica_ids(0),
                                     replica_group.replica_ids(1));
  }
  return source_target_pairs;
}

P2PConfig GetP2PConfigForSendRecv(const HloSendRecvInstruction* instr,
                                  const Shape& shape, int64_t replica_count,
                                  int64_t partition_count) {
  P2PConfig p2p_config;
  auto& config = p2p_config.config;

  config.operand_element_type.push_back(shape.element_type());
  config.group_mode = GetCollectiveOpGroupMode(
                          instr->channel_id().value_or(0) > 0, std::nullopt)
                          .value();

  // All execution instances of a Send/Recv together form a replica group.
  const int64_t num_participants =
      config.group_mode ==
              CollectiveOpGroupMode::COLLECTIVE_OP_GROUP_MODE_CROSS_REPLICA
          ? replica_count
          : partition_count;
  config.replica_groups.emplace_back();
  ReplicaGroup& replica_group = config.replica_groups.front();
  for (int i = 0; i < num_participants; ++i) {
    replica_group.add_replica_ids(i);
  }

  std::optional<std::string> source_target_pairs_string =
      instr->frontend_attributes().map().at(kSendRecvSourceTargetPairsAttr);

  // We currently ignore problems related to the source-target-pair string to
  // avoid using absl::StatusOr for the return type. This should be ok as
  // Send/Recv are generated by the compiler.
  if (!source_target_pairs_string.has_value()) {
    return p2p_config;
  }
  auto statusor = ParseReplicaGroupsOnly(*source_target_pairs_string);
  if (!statusor.ok()) {
    return p2p_config;
  }

  std::vector<ReplicaGroup> replica_groups = statusor.value();
  auto validation_it =
      instr->frontend_attributes().map().find(kSendRecvValidationAttr);
  P2PConfig::ValidationKind validation_kind = P2PConfig::ValidationKind::kValid;
  std::vector<ReplicaGroup> bounds;
  if (validation_it != instr->frontend_attributes().map().end()) {
    if (validation_it->second == "invalid") {
      validation_kind = P2PConfig::ValidationKind::kInvalid;
    } else {
      auto statusor_bounds = ParseReplicaGroupsOnly(validation_it->second);
      if (!statusor_bounds.ok() ||
          statusor_bounds.value().size() != replica_groups.size()) {
        // Ignore problems related to the source-target-pair string to avoid
        // using absl::StatusOr for the return type.
        return p2p_config;
      }
      validation_kind = P2PConfig::ValidationKind::kConditional;
      bounds = statusor_bounds.value();
    }
  }

  int i = 0;
  p2p_config.validation_kind = validation_kind;
  P2PConfig::SourceTargetToBounds& source_target_to_bounds =
      p2p_config.source_target_to_bounds;
  for (const ReplicaGroup& replica_group : replica_groups) {
    int64_t source = replica_group.replica_ids(0);
    int64_t target = replica_group.replica_ids(1);

    p2p_config.id_to_source_target.insert({target, {}}).first->second.source =
        source;
    p2p_config.id_to_source_target.insert({source, {}}).first->second.target =
        target;

    if (validation_kind == P2PConfig::ValidationKind::kConditional) {
      const ReplicaGroup& bound = bounds[i];
      int64_t lower = bound.replica_ids(0);
      int64_t upper = bound.replica_ids(1);
      source_target_to_bounds[std::make_pair(source, target)] =
          std::make_pair(lower, upper);
      i++;
    }
  }

  return p2p_config;
}

AsyncStreamKind GetStreamKindForP2P(const HloInstruction* instr) {
  const auto& fe_map = instr->frontend_attributes().map();

  // kCollectiveStreamAttrName takes precedence over kSendRecvPipelineAttr.
  {
    const auto it = fe_map.find(kCollectiveStreamAttrName);
    if (it != fe_map.end() && it->second == kCollectiveStreamP2P) {
      // Use any of the two p2p streams.
      return AsyncStreamKind::ASYNC_STREAM_KIND_P2P0;
    }
  }

  const auto it = fe_map.find(kSendRecvPipelineAttr);
  if (it != fe_map.end() && it->second == "1") {
    return AsyncStreamKind::ASYNC_STREAM_KIND_P2P1;
  }
  return AsyncStreamKind::ASYNC_STREAM_KIND_P2P0;
}

// Retrieves the current collective ID (replica or partition ID) for the
// executing device.
absl::StatusOr<const int64_t> GetCollectiveCurrentId(
    CollectiveParams* collective_params, const P2PConfig& config) {
  GlobalDeviceId global_device_id = collective_params->global_device_id;
  TF_ASSIGN_OR_RETURN(
      const DeviceAssignment::LogicalID current_logical_id,
      collective_params->device_assn->LogicalIdForDevice(global_device_id));
  const int64_t current_id =
      config.config.group_mode ==
              CollectiveOpGroupMode::COLLECTIVE_OP_GROUP_MODE_CROSS_REPLICA
          ? current_logical_id.replica_id
          : current_logical_id.computation_id;
  return current_id;
}

}  // namespace gpu
}  // namespace xla
