name
stringlengths
18
69
filepath
stringclasses
38 values
source
stringclasses
38 values
test
stringlengths
306
10.4k
AllGatherCombinerTest_CombineAllGathers
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, CombineAllGathers) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param0 = f32[32] parameter(0) param1 = f32[32] parameter(1) allgather0 = f32[128] all-gather(param0), replica_groups={}, dimensions={0} allgather1 = f32[128] all-gather(param1), replica_groups={}, dimensions={0} ROOT tuple = (f32[128], f32[128]) tuple(allgather0, allgather1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); AllGatherCombiner combine(1024 * 1024, kMaxCombineCount, /*combine_by_dim=*/true); ASSERT_EQ(AllGatherCount(*module), 2); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_TRUE(changed); Matcher<const HloInstruction*> combined_all_gather = op::AllGather(op::Parameter(0), op::Parameter(1)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(combined_all_gather, 0), op::GetTupleElement(combined_all_gather, 1))); }
AllGatherCombinerTest_CombineAllGathersByAllGatherDimension
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, CombineAllGathersByAllGatherDimension) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param0 = f32[2,2] parameter(0) param1 = f32[2,2] parameter(1) param2 = f32[2,2] parameter(2) param3 = f32[2,2] parameter(3) param4 = f32[2,2] parameter(4) allgather0 = f32[8,2] all-gather(param0), replica_groups={}, dimensions={0} allgather1 = f32[8,2] all-gather(param1), replica_groups={}, dimensions={0} allgather2 = f32[2,8] all-gather(param2), replica_groups={}, dimensions={1} allgather3 = f32[2,8] all-gather(param3), replica_groups={}, dimensions={1} allgather4 = f32[8,2] all-gather(param4), replica_groups={}, dimensions={0} ROOT tuple = (f32[8,2], f32[8,2], f32[2,8], f32[2,8], f32[8,2]) tuple(allgather0, allgather1, allgather2, allgather3, allgather4) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); AllGatherCombiner combine(1024 * 1024, kMaxCombineCount, /*combine_by_dim=*/true); ASSERT_EQ(AllGatherCount(*module), 5); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_TRUE(changed); Matcher<const HloInstruction*> combined_all_gather0 = op::AllGather(op::Parameter(0), op::Parameter(1), op::Parameter(4)); Matcher<const HloInstruction*> combined_all_gather1 = op::AllGather(op::Parameter(2), op::Parameter(3)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(combined_all_gather0, 0), op::GetTupleElement(combined_all_gather0, 1), op::GetTupleElement(combined_all_gather1, 0), op::GetTupleElement(combined_all_gather1, 1), op::GetTupleElement(combined_all_gather0, 2))); }
AllGatherCombinerTest_CombineAllGathersByDim
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, CombineAllGathersByDim) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param0 = f32[2,7]{1,0} parameter(0) param1 = f32[3,8]{1,0} parameter(1) param2 = f32[4,9]{0,1} parameter(2) param3 = f32[5,10]{0,1} parameter(3) param4 = f32[6,11]{1,0} parameter(4) allgather0 = f32[8,7]{1,0} all-gather(param0), replica_groups={}, dimensions={0} allgather1 = f32[12,8]{1,0} all-gather(param1), replica_groups={}, dimensions={0} allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={}, dimensions={1} allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={}, dimensions={1} allgather4 = f32[24,11]{1,0} all-gather(param4), replica_groups={}, dimensions={0} ROOT tuple = (f32[8,7]{1,0}, f32[12,8]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1}, f32[24,11]{1,0}) tuple(allgather0, allgather1, allgather2, allgather3, allgather4) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); AllGatherCombiner combine(1024 * 1024, kMaxCombineCount, /*combine_by_dim=*/true); ASSERT_EQ(AllGatherCount(*module), 5); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_TRUE(changed); Matcher<const HloInstruction*> combined_all_gather_0 = op::AllGather(op::Parameter(0), op::Parameter(1), op::Parameter(4)); Matcher<const HloInstruction*> combined_all_gather_1 = op::AllGather(op::Parameter(2), op::Parameter(3)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(combined_all_gather_0, 0), op::GetTupleElement(combined_all_gather_0, 1), op::GetTupleElement(combined_all_gather_1, 0), op::GetTupleElement(combined_all_gather_1, 1), op::GetTupleElement(combined_all_gather_0, 2))); std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module); ASSERT_EQ(2, all_gathers.size()); ASSERT_EQ(0, all_gathers[0]->all_gather_dimension()); ASSERT_EQ(1, all_gathers[1]->all_gather_dimension()); }
AllGatherCombinerTest_CombineAllGathersDifferentDims
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, CombineAllGathersDifferentDims) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param0 = f32[2,3]{1,0} parameter(0) param1 = f32[2,3]{0,1} parameter(1) allgather0 = f32[8,3]{1,0} all-gather(param0), replica_groups={}, dimensions={0} allgather1 = f32[2,12]{0,1} all-gather(param1), replica_groups={}, dimensions={1} ROOT tuple = (f32[8,3]{1,0}, f32[2,12]{0,1}) tuple(allgather0, allgather1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); AllGatherCombiner combine(1024 * 1024, kMaxCombineCount, /*combine_by_dim=*/false); ASSERT_EQ(AllGatherCount(*module), 2); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_TRUE(changed); Matcher<const HloInstruction*> combined_all_gather = op::AllGather(op::Parameter(0), op::Bitcast(op::Parameter(1))); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(combined_all_gather, 0), op::Bitcast(op::GetTupleElement(combined_all_gather, 1)))); }
AllGatherCombinerTest_CombineFromTwoDomainsWithSameMetadata
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h"
AllGatherCombinerTest_CombineManyAllGathersDifferentDims
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDims) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param0 = f32[2,7]{1,0} parameter(0) param1 = f32[3,8]{1,0} parameter(1) param2 = f32[4,9]{0,1} parameter(2) param3 = f32[5,10]{0,1} parameter(3) param4 = f32[6,11]{1,0} parameter(4) allgather0 = f32[8,7]{1,0} all-gather(param0), replica_groups={}, dimensions={0} allgather1 = f32[12,8]{1,0} all-gather(param1), replica_groups={}, dimensions={0} allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={}, dimensions={1} allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={}, dimensions={1} allgather4 = f32[24,11]{1,0} all-gather(param4), replica_groups={}, dimensions={0} ROOT tuple = (f32[8,7]{1,0}, f32[12,8]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1}, f32[24,11]{1,0}) tuple(allgather0, allgather1, allgather2, allgather3, allgather4) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); AllGatherCombiner combine(1024 * 1024, kMaxCombineCount, /*combine_by_dim=*/false); ASSERT_EQ(AllGatherCount(*module), 5); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_TRUE(changed); Matcher<const HloInstruction*> combined_all_gather = op::AllGather( op::Parameter(0), op::Parameter(1), op::Bitcast(op::Parameter(2)), op::Bitcast(op::Parameter(3)), op::Parameter(4)); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(combined_all_gather, 0), op::GetTupleElement(combined_all_gather, 1), op::Bitcast(op::GetTupleElement(combined_all_gather, 2)), op::Bitcast(op::GetTupleElement(combined_all_gather, 3)), op::GetTupleElement(combined_all_gather, 4))); std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module); ASSERT_EQ(1, all_gathers.size()); ASSERT_EQ(0, all_gathers.front()->all_gather_dimension()); }
AllGatherCombinerTest_CombineManyAllGathersDifferentDimsMixedRanks
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDimsMixedRanks) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param0 = f32[2,7]{1,0} parameter(0) param1 = f32[3,8]{1,0} parameter(1) param2 = f32[4,9]{0,1} parameter(2) param3 = f32[5,10]{0,1} parameter(3) param4 = f32[6]{0} parameter(4) allgather0 = f32[2,28]{1,0} all-gather(param0), replica_groups={}, dimensions={1} allgather1 = f32[3,32]{1,0} all-gather(param1), replica_groups={}, dimensions={1} allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={}, dimensions={1} allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={}, dimensions={1} allgather4 = f32[24]{0} all-gather(param4), replica_groups={}, dimensions={0} ROOT tuple = (f32[2,28]{1,0}, f32[3,32]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1}, f32[24]{0}) tuple(allgather0, allgather1, allgather2, allgather3, allgather4) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); AllGatherCombiner combine(1024 * 1024, kMaxCombineCount, /*combine_by_dim=*/false); ASSERT_EQ(AllGatherCount(*module), 5); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_TRUE(changed); Matcher<const HloInstruction*> combined_all_gather = op::AllGather( op::Bitcast(op::Parameter(0)), op::Bitcast(op::Parameter(1)), op::Bitcast(op::Parameter(2)), op::Bitcast(op::Parameter(3)), op::Parameter(4)); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::Bitcast(op::GetTupleElement(combined_all_gather, 0)), op::Bitcast(op::GetTupleElement(combined_all_gather, 1)), op::Bitcast(op::GetTupleElement(combined_all_gather, 2)), op::Bitcast(op::GetTupleElement(combined_all_gather, 3)), op::GetTupleElement(combined_all_gather, 4))); std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module); ASSERT_EQ(1, all_gathers.size()); // when using different ranks and the most frequent AG dim (1) is not valid // for rank 1 shape, we use default dim 0. ASSERT_EQ(0, all_gathers.front()->all_gather_dimension()); }
AllGatherCombinerTest_CombineManyAllGathersDifferentDimsRank4
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDimsRank4) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param0 = f32[2,7,2,7]{3,2,1,0} parameter(0) param1 = f32[3,8,3,8]{3,2,1,0} parameter(1) param2 = f32[4,9,4,9]{3,0,1,2} parameter(2) param3 = f32[5,10,5,10]{3,0,1,2} parameter(3) param4 = f32[6,11,6,11]{3,2,1,0} parameter(4) allgather0 = f32[8,7,2,7]{3,2,1,0} all-gather(param0), replica_groups={}, dimensions={0} allgather1 = f32[12,8,3,8]{3,2,1,0} all-gather(param1), replica_groups={}, dimensions={0} allgather2 = f32[4,9,16,9]{3,0,1,2} all-gather(param2), replica_groups={}, dimensions={2} allgather3 = f32[5,10,20,10]{3,0,1,2} all-gather(param3), replica_groups={}, dimensions={2} allgather4 = f32[24,11,6,11]{3,2,1,0} all-gather(param4), replica_groups={}, dimensions={0} ROOT tuple = (f32[8,7,2,7]{3,2,1,0}, f32[12,8,3,8]{3,2,1,0}, f32[4,9,16,9]{3,0,1,2}, f32[5,10,20,10]{3,0,1,2}, f32[24,11,6,11]{3,2,1,0}) tuple(allgather0, allgather1, allgather2, allgather3, allgather4) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); AllGatherCombiner combine(1024 * 1024, kMaxCombineCount, /*combine_by_dim=*/false); ASSERT_EQ(AllGatherCount(*module), 5); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_TRUE(changed); Matcher<const HloInstruction*> combined_all_gather = op::AllGather( op::Parameter(0), op::Parameter(1), op::Bitcast(op::Parameter(2)), op::Bitcast(op::Parameter(3)), op::Parameter(4)); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(combined_all_gather, 0), op::GetTupleElement(combined_all_gather, 1), op::Bitcast(op::GetTupleElement(combined_all_gather, 2)), op::Bitcast(op::GetTupleElement(combined_all_gather, 3)), op::GetTupleElement(combined_all_gather, 4))); std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module); ASSERT_EQ(1, all_gathers.size()); ASSERT_EQ(0, all_gathers.front()->all_gather_dimension()); }
AllGatherCombinerTest_CombineUpToThreshold
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, CombineUpToThreshold) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param0 = f32[8] parameter(0) param1 = f32[8] parameter(1) allgather0 = f32[32] all-gather(param0), replica_groups={}, dimensions={0} allgather1 = f32[32] all-gather(param1), replica_groups={}, dimensions={0} ROOT tuple = (f32[32], f32[32]) tuple(allgather0, allgather1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); // Run the AllGather combiner optimization pass with a threshold just higher // than that required such that the combination can occur. AllGatherCombiner combine(256, kMaxCombineCount, /*combine_by_dim=*/true); ASSERT_EQ(AllGatherCount(*module), 2); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_EQ(AllGatherCount(*module), 1); EXPECT_TRUE(changed); }
AllGatherCombinerTest_DomainPreventsCombining
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h"
AllGatherCombinerTest_DoNotCombineOverThreshold
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, DoNotCombineOverThreshold) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param0 = f32[8] parameter(0) param1 = f32[8] parameter(1) allgather0 = f32[32] all-gather(param0), replica_groups={}, dimensions={0} allgather1 = f32[32] all-gather(param1), replica_groups={}, dimensions={0} ROOT tuple = (f32[32], f32[32]) tuple(allgather0, allgather1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); // Run the AllGather combiner optimization pass with threshold less than // the combined size of the all gather ops so that the combination // cannot occur. AllGatherCombiner combine(255, kMaxCombineCount, /*combine_by_dim=*/true); ASSERT_EQ(AllGatherCount(*module), 2); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_EQ(AllGatherCount(*module), 2); EXPECT_FALSE(changed); }
AllGatherCombinerTest_NoDependentCombination
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" TEST_F(AllGatherCombinerTest, NoDependentCombination) { const char* const hlo_string = R"( HloModule Module ENTRY entry { param = f32[1] parameter(0) allgather0 = f32[2] all-gather(param), replica_groups={}, dimensions={0} ROOT allgather1 = f32[4] all-gather(allgather0), replica_groups={}, dimensions={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); AllGatherCombiner combine(1024 * 1024, kMaxCombineCount, /*combine_by_dim=*/true); ASSERT_EQ(AllGatherCount(*module), 2); TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get())); EXPECT_EQ(AllGatherCount(*module), 2); EXPECT_FALSE(changed); }
AllGatherCombinerTest_NoDifferentReplicaGroupsCombination
xla/service/all_gather_combiner_test.cc
int64_t FindMostFrequentGatherDim( absl::Span<HloInstruction* const> to_combine) { assert(!to_combine.empty()); // Count frequencies. int64_t min_rank = std::numeric_limits<int64_t>::max(); std::vector<int64_t> frequency; for (const HloInstruction* it : to_combine) { int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension(); frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())), 0); frequency[dim]++; min_rank = std::min(min_rank, it->shape().rank()); } int64_t most_frequent_dim = std::distance( frequency.begin(), std::max_element(frequency.begin(), frequency.end())); return most_frequent_dim < min_rank ? most_frequent_dim : 0; } absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine, bool combine_by_dim) { if (to_combine.size() < 2) { return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; HloComputation& computation = *to_combine.back()->parent(); // Create a single bigger AllGather of the operands of the smaller AllGather. std::vector<HloInstruction*> operands; std::vector<std::optional<std::vector<int64_t>>> operand_permutations; std::vector<Shape> output_shapes; // Find the most frequent all-gather dimension. int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine); VLOG(1) << "Combining set"; for (HloInstruction* hlo : to_combine) { VLOG(1) << "Set element: " << hlo->ToString(); TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather); const auto* ag = Cast<HloAllGatherInstruction>(hlo); TF_RET_CHECK(hlo->operand_count() == 1); TF_RET_CHECK(hlo->shape().IsArray()); TF_RET_CHECK(!combine_by_dim || ag->all_gather_dimension() == most_frequent_dim); HloInstruction* operand = hlo->operands().front(); operands.push_back(operand); operand_permutations.emplace_back(); output_shapes.push_back(hlo->shape()); // Bitcast operand if needed. if (ag->all_gather_dimension() != most_frequent_dim) { const Shape& operand_shape = operand->shape(); // Build permutation to align gather dimension. auto& perm = operand_permutations.back(); perm = std::vector<int64_t>(operand_shape.rank()); std::iota(perm->begin(), perm->end(), 0); std::swap((*perm)[most_frequent_dim], (*perm)[ag->all_gather_dimension()]); // Bitcast operand and update output shape. operands.back() = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*perm, operand_shape), operand)); output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape()); } } // Create combined all-gather op with a tuple result. HloInstruction* combined; combined = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim, to_combine.front()->device_list(), /*constrain_layout=*/false, to_combine.front()->channel_id(), Cast<HloAllGatherInstruction>(to_combine.front()) ->use_global_device_ids())); // We have to propagate the sharding manually because Domain instructions are // not guaranteed to preserve it for side effecting instructions. combined->set_sharding( hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine)); VLOG(1) << "Replacing with : " << combined->ToString(); // Replace all the smaller all-gather ops with (bitcast) elements of the tuple // result. for (int64_t i = 0; i < to_combine.size(); ++i) { HloInstruction* replacement = computation.AddInstruction( HloInstruction::CreateGetTupleElement(combined, i)); if (operand_permutations[i]) { replacement = computation.AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::PermuteDimensions(*operand_permutations[i], replacement->shape()), replacement)); } TF_RETURN_IF_ERROR( computation.ReplaceInstruction(to_combine[i], replacement)); } return absl::OkStatus(); } VLOG(1) << "Combined " << to_combine.size() << " AllGather ops"; VLOG(1) << "Combining set"; VLOG(1) << "Set element: " << hlo->ToString(); VLOG(1) << "Replacing with : " << combined->ToString(); std::optional<GroupKey> CombineKey(const HloInstruction* instruction, const HloDomainMap& domain_map, bool combine_by_dim) { if (instruction->opcode() != HloOpcode::kAllGather) { return std::nullopt; } std::vector<std::vector<int64_t>> replica_groups; const auto* ag = Cast<HloAllGatherInstruction>(instruction); replica_groups.reserve(ag->replica_groups().size()); for (const ReplicaGroup& replica_group : ag->replica_groups()) { replica_groups.push_back( std::vector<int64_t>(replica_group.replica_ids().begin(), replica_group.replica_ids().end())); } // Ignore dimension (set to -1) if we are not grouping by dimension. int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1; return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag), ag->channel_id().has_value(), ag->use_global_device_ids(), replica_groups}; } AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes, int64_t combine_threshold_count, bool combine_by_dim) : combine_threshold_in_bytes_(combine_threshold_in_bytes), combine_threshold_count_(combine_threshold_count), combine_by_dim_(combine_by_dim) {} absl::StatusOr<bool> AllGatherCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllGatherCombiner with threshold of " << combine_threshold_in_bytes_ << " bytes"; if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) { VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; return false; } if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherCombiner because the module contains " "all-gather with constrained layouts"; return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, "")); auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); }; auto combine_fn = [&](absl::Span<HloInstruction* const> to_combine) -> absl::Status { return CombineAllGathers(to_combine, combine_by_dim_); }; TF_ASSIGN_OR_RETURN( bool computation_changed, CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn, combine_threshold_in_bytes_, combine_threshold_count_)); changed |= computation_changed; } return changed; } VLOG(1) << "Running AllGatherCombiner with threshold of " VLOG(1) << "Skip AllGatherCombiner because the threshold is zero"; VLOG(1) << "Skip AllGatherCombiner because the module contains " auto key_fn = [&](const HloInstruction* instruction) { return CombineKey(instruction, *domain_map, combine_by_dim_); };
#include "xla/service/all_gather_combiner.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h"
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
0
Edit dataset card