Original Code File
stringlengths
196
31.9k
Oniginal Ground Truth
stringlengths
78
32k
Code
stringlengths
15
27.3k
Unit Test
stringlengths
0
30.4k
#include "tsl/platform/coding.h" #include "tsl/platform/byte_order.h" #include "tsl/platform/stringpiece.h" #include "tsl/platform/tstring.h" #include "tsl/platform/types.h" namespace tsl { namespace core { void EncodeFixed16(char* buf, uint16 value) { if (port::kLittleEndian) { memcpy(buf, &value, sizeof(value)); } else { buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; } } void EncodeFixed32(char* buf, uint32 value) { if (port::kLittleEndian) { memcpy(buf, &value, sizeof(value)); } else { buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; buf[2] = (value >> 16) & 0xff; buf[3] = (value >> 24) & 0xff; } } void EncodeFixed64(char* buf, uint64 value) { if (port::kLittleEndian) { memcpy(buf, &value, sizeof(value)); } else { buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; buf[2] = (value >> 16) & 0xff; buf[3] = (value >> 24) & 0xff; buf[4] = (value >> 32) & 0xff; buf[5] = (value >> 40) & 0xff; buf[6] = (value >> 48) & 0xff; buf[7] = (value >> 56) & 0xff; } } void PutFixed16(string* dst, uint16 value) { char buf[sizeof(value)]; EncodeFixed16(buf, value); dst->append(buf, sizeof(buf)); } void PutFixed32(string* dst, uint32 value) { char buf[sizeof(value)]; EncodeFixed32(buf, value); dst->append(buf, sizeof(buf)); } void PutFixed64(string* dst, uint64 value) { char buf[sizeof(value)]; EncodeFixed64(buf, value); dst->append(buf, sizeof(buf)); } char* EncodeVarint32(char* dst, uint32 v) { unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); static const int B = 128; if (v < (1 << 7)) { *(ptr++) = v; } else if (v < (1 << 14)) { *(ptr++) = v | B; *(ptr++) = v >> 7; } else if (v < (1 << 21)) { *(ptr++) = v | B; *(ptr++) = (v >> 7) | B; *(ptr++) = v >> 14; } else if (v < (1 << 28)) { *(ptr++) = v | B; *(ptr++) = (v >> 7) | B; *(ptr++) = (v >> 14) | B; *(ptr++) = v >> 21; } else { *(ptr++) = v | B; *(ptr++) = (v >> 7) | B; *(ptr++) = (v >> 14) | B; *(ptr++) = (v >> 21) | B; *(ptr++) = v >> 28; } return reinterpret_cast<char*>(ptr); } void PutVarint32(string* dst, uint32 v) { char buf[5]; char* ptr = EncodeVarint32(buf, v); dst->append(buf, ptr - buf); } void PutVarint32(tstring* dst, uint32 v) { char buf[5]; char* ptr = EncodeVarint32(buf, v); dst->append(buf, ptr - buf); } char* EncodeVarint64(char* dst, uint64 v) { static const int B = 128; unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); while (v >= B) { *(ptr++) = (v & (B - 1)) | B; v >>= 7; } *(ptr++) = static_cast<unsigned char>(v); return reinterpret_cast<char*>(ptr); } void PutVarint64(string* dst, uint64 v) { char buf[10]; char* ptr = EncodeVarint64(buf, v); dst->append(buf, ptr - buf); } void PutVarint64(tstring* dst, uint64 v) { char buf[10]; char* ptr = EncodeVarint64(buf, v); dst->append(buf, ptr - buf); } int VarintLength(uint64_t v) { int len = 1; while (v >= 128) { v >>= 7; len++; } return len; } const char* GetVarint32Ptr(const char* p, const char* limit, uint32* value) { if (p < limit) { uint32 result = *(reinterpret_cast<const unsigned char*>(p)); if ((result & 128) == 0) { *value = result; return p + 1; } } return GetVarint32PtrFallback(p, limit, value); } const char* GetVarint32PtrFallback(const char* p, const char* limit, uint32* value) { uint32 result = 0; for (uint32 shift = 0; shift <= 28 && p < limit; shift += 7) { uint32 byte = *(reinterpret_cast<const unsigned char*>(p)); p++; if (byte & 128) { result |= ((byte & 127) << shift); } else { result |= (byte << shift); *value = result; return reinterpret_cast<const char*>(p); } } return nullptr; } bool GetVarint32(StringPiece* input, uint32* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint32Ptr(p, limit, value); if (q == nullptr) { return false; } else { *input = StringPiece(q, limit - q); return true; } } const char* GetVarint64Ptr(const char* p, const char* limit, uint64* value) { uint64 result = 0; for (uint32 shift = 0; shift <= 63 && p < limit; shift += 7) { uint64 byte = *(reinterpret_cast<const unsigned char*>(p)); p++; if (byte & 128) { result |= ((byte & 127) << shift); } else { result |= (byte << shift); *value = result; return reinterpret_cast<const char*>(p); } } return nullptr; } bool GetVarint64(StringPiece* input, uint64* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint64Ptr(p, limit, value); if (q == nullptr) { return false; } else { *input = StringPiece(q, limit - q); return true; } } } }
#include "tensorflow/core/lib/core/coding.h" #include <vector> #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace core { TEST(Coding, Fixed16) { static const uint16 N = 50000; string s; for (uint16 v = 0; v < N; v++) { char buf[sizeof(uint16)]; EncodeFixed16(buf, v); s.append(buf, sizeof(buf)); } const char* p = s.data(); for (uint16 v = 0; v < N; v++) { uint16 actual = DecodeFixed16(p); ASSERT_EQ(v, actual); p += sizeof(uint16); } } TEST(Coding, Fixed32) { static const uint32 N = 100000; string s; for (uint32 v = 0; v < N; v++) { char buf[sizeof(uint32)]; EncodeFixed32(buf, v); s.append(buf, sizeof(buf)); } const char* p = s.data(); for (uint32 v = 0; v < N; v++) { uint32 actual = DecodeFixed32(p); ASSERT_EQ(v, actual); p += sizeof(uint32); } } TEST(Coding, Fixed64) { string s; for (int power = 0; power <= 63; power++) { uint64 v = static_cast<uint64>(1) << power; char buf[sizeof(uint64)]; EncodeFixed64(buf, v - 1); s.append(buf, sizeof(buf)); EncodeFixed64(buf, v + 0); s.append(buf, sizeof(buf)); EncodeFixed64(buf, v + 1); s.append(buf, sizeof(buf)); } const char* p = s.data(); for (int power = 0; power <= 63; power++) { uint64 v = static_cast<uint64>(1) << power; uint64 actual; actual = DecodeFixed64(p); ASSERT_EQ(v - 1, actual); p += sizeof(uint64); actual = DecodeFixed64(p); ASSERT_EQ(v + 0, actual); p += sizeof(uint64); actual = DecodeFixed64(p); ASSERT_EQ(v + 1, actual); p += sizeof(uint64); } } TEST(Coding, EncodingOutput) { char dst[8]; EncodeFixed16(dst, 0x0201); ASSERT_EQ(0x01, static_cast<int>(dst[0])); ASSERT_EQ(0x02, static_cast<int>(dst[1])); EncodeFixed32(dst, 0x04030201); ASSERT_EQ(0x01, static_cast<int>(dst[0])); ASSERT_EQ(0x02, static_cast<int>(dst[1])); ASSERT_EQ(0x03, static_cast<int>(dst[2])); ASSERT_EQ(0x04, static_cast<int>(dst[3])); EncodeFixed64(dst, 0x0807060504030201ull); ASSERT_EQ(0x01, static_cast<int>(dst[0])); ASSERT_EQ(0x02, static_cast<int>(dst[1])); ASSERT_EQ(0x03, static_cast<int>(dst[2])); ASSERT_EQ(0x04, static_cast<int>(dst[3])); ASSERT_EQ(0x05, static_cast<int>(dst[4])); ASSERT_EQ(0x06, static_cast<int>(dst[5])); ASSERT_EQ(0x07, static_cast<int>(dst[6])); ASSERT_EQ(0x08, static_cast<int>(dst[7])); } TEST(Coding, Varint32) { string s; for (uint32 i = 0; i < (32 * 32); i++) { uint32 v = (i / 32) << (i % 32); PutVarint32(&s, v); } const char* p = s.data(); const char* limit = p + s.size(); for (uint32 i = 0; i < (32 * 32); i++) { uint32 expected = (i / 32) << (i % 32); uint32 actual; p = GetVarint32Ptr(p, limit, &actual); ASSERT_TRUE(p != nullptr); ASSERT_EQ(expected, actual); } ASSERT_EQ(p, s.data() + s.size()); } TEST(Coding, Varint64) { std::vector<uint64> values; values.push_back(0); values.push_back(100); values.push_back(~static_cast<uint64>(0)); values.push_back(~static_cast<uint64>(0) - 1); for (uint32 k = 0; k < 64; k++) { const uint64 power = 1ull << k; values.push_back(power); values.push_back(power - 1); values.push_back(power + 1); } string s; for (size_t i = 0; i < values.size(); i++) { PutVarint64(&s, values[i]); } const char* p = s.data(); const char* limit = p + s.size(); for (size_t i = 0; i < values.size(); i++) { ASSERT_TRUE(p < limit); uint64 actual; p = GetVarint64Ptr(p, limit, &actual); ASSERT_TRUE(p != nullptr); ASSERT_EQ(values[i], actual); } ASSERT_EQ(p, limit); } TEST(Coding, Varint32Overflow) { uint32 result; string input("\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result) == nullptr); } TEST(Coding, Varint32Truncation) { uint32 large_value = (1u << 31) + 100; string s; PutVarint32(&s, large_value); uint32 result; for (size_t len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr); } ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr); ASSERT_EQ(large_value, result); } TEST(Coding, Varint64Overflow) { uint64 result; string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result) == nullptr); } TEST(Coding, Varint64Truncation) { uint64 large_value = (1ull << 63) + 100ull; string s; PutVarint64(&s, large_value); uint64 result; for (size_t len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr); } ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr); ASSERT_EQ(large_value, result); } } }
void PutVarint32(string* dst, uint32 v) { char buf[5]; char* ptr = EncodeVarint32(buf, v); dst->append(buf, ptr - buf); }
TEST(Coding, Varint32) { string s; for (uint32 i = 0; i < (32 * 32); i++) { uint32 v = (i / 32) << (i % 32); PutVarint32(&s, v); } const char* p = s.data(); const char* limit = p + s.size(); for (uint32 i = 0; i < (32 * 32); i++) { uint32 expected = (i / 32) << (i % 32); uint32 actual; p = GetVarint32Ptr(p, limit, &actual); ASSERT_TRUE(p != nullptr); ASSERT_EQ(expected, actual); } ASSERT_EQ(p, s.data() + s.size()); }
#include "tensorstore/index_space/internal/translate_op.h" #include <algorithm> #include "absl/status/status.h" #include "tensorstore/internal/integer_overflow.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { namespace internal_index_space { namespace { absl::Status TranslateOutputOffsetsUsingInputOffsets( TransformRep* transform, const Index* input_offsets) { const DimensionIndex output_rank = transform->output_rank; const DimensionIndex input_rank = transform->input_rank; span<OutputIndexMap> maps = transform->output_index_maps().first(output_rank); for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) { auto& map = maps[output_dim]; switch (map.method()) { case OutputIndexMethod::single_input_dimension: { const DimensionIndex input_dim = map.input_dimension(); const Index offset_change = input_offsets[input_dim]; Index new_offset; if (internal::MulOverflow(offset_change, map.stride(), &new_offset) || internal::SubOverflow(map.offset(), new_offset, &map.offset())) { return absl::InvalidArgumentError(tensorstore::StrCat( "Integer overflow computing output offset for dimension ", output_dim, ".")); } break; } case OutputIndexMethod::array: { auto& index_array_data = map.index_array_data(); index_array_data.element_pointer = AddByteOffset( std::move(index_array_data.element_pointer), -IndexInnerProduct(input_rank, index_array_data.byte_strides, input_offsets)); break; } case OutputIndexMethod::constant: break; } } return absl::OkStatus(); } } Result<IndexTransform<>> ApplyTranslate(IndexTransform<> transform, DimensionIndexBuffer* dimensions, IndexVectorOrScalarView offsets, TranslateOpKind kind, bool domain_only) { const DimensionIndex num_dims = dimensions->size(); const DimensionIndex input_rank = transform.input_rank(); TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(offsets, num_dims)); TransformRep::Ptr<> rep = MutableRep( TransformAccess::rep_ptr<container>(std::move(transform)), domain_only); const auto input_domain = rep->input_domain(input_rank); Index input_offsets[kMaxRank]; std::fill_n(&input_offsets[0], input_rank, static_cast<Index>(0)); for (DimensionIndex i = 0; i < num_dims; ++i) { const DimensionIndex input_dim = (*dimensions)[i]; Index offset = offsets[i]; if (offset == kImplicit) continue; const IndexInterval old_interval = input_domain[input_dim]; IndexInterval new_interval; switch (kind) { case TranslateOpKind::kTranslateTo: { TENSORSTORE_ASSIGN_OR_RETURN(new_interval, ShiftIntervalTo(old_interval, offset)); offset = new_interval.inclusive_min() - old_interval.inclusive_min(); break; } case TranslateOpKind::kTranslateBackwardBy: { offset = -offset; } [[fallthrough]]; case TranslateOpKind::kTranslateBy: { TENSORSTORE_ASSIGN_OR_RETURN(new_interval, ShiftInterval(old_interval, offset)); break; } } input_domain[input_dim] = new_interval; input_offsets[input_dim] = offset; } TENSORSTORE_RETURN_IF_ERROR( TranslateOutputOffsetsUsingInputOffsets(rep.get(), &input_offsets[0])); internal_index_space::DebugCheckInvariants(rep.get()); return TransformAccess::Make<IndexTransform<>>(std::move(rep)); } } }
#include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/index_space/dim_expression.h" #include "tensorstore/index_space/index_domain_builder.h" #include "tensorstore/index_space/index_transform_builder.h" #include "tensorstore/index_space/internal/dim_expression_testutil.h" #include "tensorstore/util/status.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::AllDims; using ::tensorstore::DimensionIndex; using ::tensorstore::Dims; using ::tensorstore::Index; using ::tensorstore::IndexDomainBuilder; using ::tensorstore::IndexTransformBuilder; using ::tensorstore::kImplicit; using ::tensorstore::kInfIndex; using ::tensorstore::kInfSize; using ::tensorstore::kMaxFiniteIndex; using ::tensorstore::kMinFiniteIndex; using ::tensorstore::MakeArray; using ::tensorstore::MatchesStatus; using ::tensorstore::span; using ::tensorstore::internal_index_space::EquivalentIndices; using ::tensorstore::internal_index_space::TestDimExpression; TEST(TranslateByTest, Example) { const auto original_transform = IndexTransformBuilder<3, 3>() .input_origin({1, 2, 3}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_identity_transform() .Finalize() .value(); const auto expected_new_transform = IndexTransformBuilder<3, 3>() .input_origin({11, 2, 23}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_single_input_dimension(0, -10, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, -20, 1, 2) .Finalize() .value(); const EquivalentIndices equivalent_indices = { {{2, 3, 3}, {12, 3, 23}}, }; TestDimExpression(original_transform, Dims(0, 2).TranslateBy({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); TestDimExpression(original_transform, Dims("x", "z").TranslateBy({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); } TEST(TranslateBackwardByTest, Example) { const auto original_transform = IndexTransformBuilder<3, 3>() .input_origin({1, 2, 3}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_identity_transform() .Finalize() .value(); const auto expected_new_transform = IndexTransformBuilder<3, 3>() .input_origin({-9, 2, -17}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_single_input_dimension(0, 10, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, 20, 1, 2) .Finalize() .value(); const EquivalentIndices equivalent_indices = { {{2, 3, 3}, {-8, 3, -17}}, }; TestDimExpression(original_transform, Dims(0, 2).TranslateBackwardBy({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); TestDimExpression(original_transform, Dims("x", "z").TranslateBackwardBy({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); } TEST(TranslateToTest, Example) { const auto original_transform = IndexTransformBuilder<3, 3>() .input_origin({1, 2, 3}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_identity_transform() .Finalize() .value(); const auto expected_new_transform = IndexTransformBuilder<3, 3>() .input_origin({10, 2, 20}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_single_input_dimension(0, -9, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, -17, 1, 2) .Finalize() .value(); const EquivalentIndices equivalent_indices = { {{2, 3, 3}, {11, 3, 20}}, }; TestDimExpression(original_transform, Dims(0, 2).TranslateTo({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); TestDimExpression(original_transform, Dims(0, 2).TranslateTo({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); } TEST(TranslateByTest, OneDimensionalConstant) { TestDimExpression( IndexTransformBuilder<1, 1>() .output_constant(0, 2) .Finalize() .value(), AllDims().TranslateBy(5), {0}, IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, -5, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .output_constant(0, 2) .Finalize() .value(), {{{4}, {9}}}); } TEST(TranslateByTest, OneDimensionalSingleInputDimension) { TestDimExpression( IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), AllDims().TranslateBy(5), {0}, IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, -5, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, 2 - 3 * 5, 3, 0) .Finalize() .value(), {{{4}, {9}}}); } TEST(TranslateByTest, OneDimensionalSingleInputDimensionImplicit) { TestDimExpression( IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), AllDims().TranslateBy(kImplicit), {0}, IndexTransformBuilder<1, 1>() .output_identity_transform() .Finalize() .value(), IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), {{{4}, {4}}}); } TEST(TranslateByTest, OneDimensionalIndexArray) { TestDimExpression( IndexTransformBuilder<1, 1>() .input_origin({-2}) .input_shape({5}) .output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10})) .Finalize() .value(), AllDims().TranslateBy(5), {0}, IndexTransformBuilder<1, 1>() .input_origin({3}) .input_shape({5}) .output_single_input_dimension(0, -5, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .input_origin({3}) .input_shape({5}) .output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10})) .Finalize() .value(), {{{1}, {6}}}); } TEST(TranslateByTest, AllDimsUniform) { TestDimExpression( IndexTransformBuilder<3, 5>() .input_origin({-kInfIndex, 5, -kInfIndex}) .input_shape({kInfSize, 30, kInfIndex + 10}) .output_single_input_dimension(0, 1, 4, 0) .output_single_input_dimension(1, 2, 5, 0) .output_constant(2, 3) .output_single_input_dimension(3, 4, 7, 1) .output_single_input_dimension(4, 5, 8, 2) .Finalize() .value(), AllDims().TranslateBy(5), {0, 1, 2}, IndexTransformBuilder<3, 3>() .input_origin({-kInfIndex, 10, -kInfIndex}) .input_shape({kInfSize, 30, kInfIndex + 15}) .output_single_input_dimension(0, -5, 1, 0) .output_single_input_dimension(1, -5, 1, 1) .output_single_input_dimension(2, -5, 1, 2) .Finalize() .value(), IndexTransformBuilder<3, 5>() .input_origin({-kInfIndex, 10, -kInfIndex}) .input_shape({kInfSize, 30, kInfIndex + 15}) .output_single_input_dimension(0, 1 - 4 * 5, 4, 0) .output_single_input_dimension(1, 2 - 5 * 5, 5, 0) .output_constant(2, 3) .output_single_input_dimension(3, 4 - 7 * 5, 7, 1) .output_single_input_dimension(4, 5 - 8 * 5, 8, 2) .Finalize() .value(), {{{4, 5, 6}, {4 + 5, 5 + 5, 6 + 5}}}); } TEST(TranslateByTest, ErrorHandling) { TestDimExpressionError( IndexTransformBuilder<1, 1>().Finalize().value(), AllDims().TranslateBy(span<const Index>({1, 2})), absl::StatusCode::kInvalidArgument, "Number of dimensions \\(1\\) does not match number of " "indices \\(2\\)"); TestDimExpressionError(IndexTransformBuilder<1, 1>() .input_origin({kMinFiniteIndex}) .input_shape({10}) .Finalize() .value(), AllDims().TranslateBy(-kInfIndex), absl::StatusCode::kInvalidArgument, ".* is outside valid range .*"); TestDimExpressionError(IndexTransformBuilder<1, 1>() .input_origin({kMinFiniteIndex}) .input_shape({10}) .Finalize() .value(), AllDims().TranslateBy(-1), absl::StatusCode::kInvalidArgument, ".* is outside valid range .*"); TestDimExpressionError(IndexTransformBuilder<1, 1>() .input_origin({kMaxFiniteIndex - 1}) .input_shape({2}) .Finalize() .value(), AllDims().TranslateBy(1), absl::StatusCode::kInvalidArgument, ".* is outside valid range .*"); TestDimExpressionError(IndexTransformBuilder<1, 1>() .output_single_input_dimension( 0, std::numeric_limits<Index>::min(), 1, 0) .Finalize() .value(), AllDims().TranslateBy(1), absl::StatusCode::kInvalidArgument, "Integer overflow computing output offset .*"); } TEST(TranslateByTest, DimSubsetUniform) { TestDimExpression(IndexTransformBuilder<3, 2>() .input_origin({1, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7}) .output_single_input_dimension(0, 1, 1, 1) .output_single_input_dimension(1, 2, 2, 2) .Finalize() .value(), Dims(0, 2).TranslateBy(5), {0, 2}, IndexTransformBuilder<3, 3>() .input_origin({6, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7 + 5}) .output_single_input_dimension(0, -5, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, -5, 1, 2) .Finalize() .value(), IndexTransformBuilder<3, 2>() .input_origin({6, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7 + 5}) .output_single_input_dimension(0, 1, 1, 1) .output_single_input_dimension(1, 2 - 2 * 5, 2, 2) .Finalize() .value(), {{{4, 5, 6}, {4 + 5, 5, 6 + 5}}}); } TEST(TranslateByTest, DimSubsetNonUniform) { TestDimExpression(IndexTransformBuilder<3, 2>() .input_origin({1, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7}) .output_single_input_dimension(0, 1, 1, 1) .output_single_input_dimension(1, 2, 2, 2) .Finalize() .value(), Dims(0, 2).TranslateBy({5, 6}), {0, 2}, IndexTransformBuilder<3, 3>() .input_origin({6, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7 + 6}) .output_single_input_dimension(0, -5, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, -6, 1, 2) .Finalize() .value(), IndexTransformBuilder<3, 2>() .input_origin({6, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7 + 6}) .output_single_input_dimension(0, 1, 1, 1) .output_single_input_dimension(1, 2 - 2 * 6, 2, 2) .Finalize() .value(), {{{3, 4, 5}, {3 + 5, 4, 5 + 6}}}); } TEST(TranslateToTest, OneDimensionalConstant) { TestDimExpression(IndexTransformBuilder<1, 1>() .input_origin({5}) .input_shape({10}) .output_constant(0, 2) .Finalize() .value(), AllDims().TranslateTo(8), {0}, IndexTransformBuilder<1, 1>() .input_origin({8}) .input_shape({10}) .output_single_input_dimension(0, -3, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .input_origin({8}) .input_shape({10}) .output_constant(0, 2) .Finalize() .value(), {{{7}, {10}}}); } TEST(TranslateToTest, OneDimensionalSingleInputDimension) { TestDimExpression(IndexTransformBuilder<1, 1>() .input_origin({4}) .input_shape({10}) .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), AllDims().TranslateTo(5), {0}, IndexTransformBuilder<1, 1>() .input_origin({5}) .input_shape({10}) .output_single_input_dimension(0, -1, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .input_origin({5}) .input_shape({10}) .output_single_input_dimension(0, 2 - 3, 3, 0) .Finalize() .value(), {{{6}, {7}}}); } TEST(TranslateToTest, OneDimensionalSingleInputDimensionImplicit) { TestDimExpression(IndexTransformBuilder<1, 1>() .input_origin({4}) .input_shape({10}) .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), AllDims().TranslateTo(kImplicit), {0}, IndexTransformBuilder<1, 1>() .input_origin({4}) .input_shape({10}) .output_single_input_dimension(0, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .input_origin({4}) .input_shape({10}) .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), {{{6}, {6}}}); } TEST(TranslateToTest, TwoDimensionalSingleInputDimensionOneImplicit) { TestDimExpression(IndexTransformBuilder<2, 2>() .input_origin({4, 5}) .input_shape({10, 11}) .output_single_input_dimension(0, 2, 3, 0) .output_single_input_dimension(1, 4, 5, 1) .Finalize() .value(), AllDims().TranslateTo({kImplicit, 10}), {0, 1}, IndexTransformBuilder<2, 2>() .input_origin({4, 10}) .input_shape({10, 11}) .output_single_input_dimension(0, 0) .output_single_input_dimension(1, -5, 1, 1) .Finalize() .value(), IndexTransformBuilder<2, 2>() .input_origin({4, 10}) .input_shape({10, 11}) .output_single_input_dimension(0, 2, 3, 0) .output_single_input_dimension(1, -25 + 4, 5, 1) .Finalize() .value(), {{{6, 7}, {6, 12}}}); } TEST(TranslateToTest, ErrorHandling) { TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(), AllDims().TranslateTo(1), absl::StatusCode::kInvalidArgument, "Interval \\(-inf, \\+inf\\) is not bounded below"); TestDimExpressionError( IndexTransformBuilder<1, 1>() .input_origin({-5}) .input_shape({10}) .Finalize() .value(), AllDims().TranslateTo(std::numeric_limits<Index>::max()), absl::StatusCode::kOutOfRange, "Origin [0-9]+ is outside valid range .*"); } TEST(TranslateToTest, IndexDomain) { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto domain, IndexDomainBuilder<3>().origin({1, 2, 3}).shape({6, 7, 8}).Finalize()); TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto translated_domain, IndexDomainBuilder<3>().origin({4, 5, 6}).shape({6, 7, 8}).Finalize()); EXPECT_THAT(domain | AllDims().TranslateTo({4, 5, 6}), ::testing::Optional(translated_domain)); } TEST(TranslateToTest, IndexDomainOverflow) { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto transform, IndexTransformBuilder(1, 1) .input_shape({10}) .output_single_input_dimension(0, kMaxFiniteIndex, kMaxFiniteIndex, 0) .Finalize()); auto domain = transform.domain(); TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto translated_domain, IndexDomainBuilder(1).origin({-5}).shape({10}).Finalize()); EXPECT_THAT(transform | AllDims().TranslateTo({-5}), MatchesStatus(absl::StatusCode::kInvalidArgument)); EXPECT_THAT(domain | AllDims().TranslateTo({-5}), ::testing::Optional(translated_domain)); } }
Result<IndexTransform<>> ApplyTranslate(IndexTransform<> transform, DimensionIndexBuffer* dimensions, IndexVectorOrScalarView offsets, TranslateOpKind kind, bool domain_only) { const DimensionIndex num_dims = dimensions->size(); const DimensionIndex input_rank = transform.input_rank(); TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(offsets, num_dims)); TransformRep::Ptr<> rep = MutableRep( TransformAccess::rep_ptr<container>(std::move(transform)), domain_only); const auto input_domain = rep->input_domain(input_rank); Index input_offsets[kMaxRank]; std::fill_n(&input_offsets[0], input_rank, static_cast<Index>(0)); for (DimensionIndex i = 0; i < num_dims; ++i) { const DimensionIndex input_dim = (*dimensions)[i]; Index offset = offsets[i]; if (offset == kImplicit) continue; const IndexInterval old_interval = input_domain[input_dim]; IndexInterval new_interval; switch (kind) { case TranslateOpKind::kTranslateTo: { TENSORSTORE_ASSIGN_OR_RETURN(new_interval, ShiftIntervalTo(old_interval, offset)); offset = new_interval.inclusive_min() - old_interval.inclusive_min(); break; } case TranslateOpKind::kTranslateBackwardBy: { offset = -offset; } [[fallthrough]]; case TranslateOpKind::kTranslateBy: { TENSORSTORE_ASSIGN_OR_RETURN(new_interval, ShiftInterval(old_interval, offset)); break; } } input_domain[input_dim] = new_interval; input_offsets[input_dim] = offset; } TENSORSTORE_RETURN_IF_ERROR( TranslateOutputOffsetsUsingInputOffsets(rep.get(), &input_offsets[0])); internal_index_space::DebugCheckInvariants(rep.get()); return TransformAccess::Make<IndexTransform<>>(std::move(rep)); }
TEST(TranslateByTest, Example) { const auto original_transform = IndexTransformBuilder<3, 3>() .input_origin({1, 2, 3}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_identity_transform() .Finalize() .value(); const auto expected_new_transform = IndexTransformBuilder<3, 3>() .input_origin({11, 2, 23}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_single_input_dimension(0, -10, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, -20, 1, 2) .Finalize() .value(); const EquivalentIndices equivalent_indices = { {{2, 3, 3}, {12, 3, 23}}, }; TestDimExpression(original_transform, Dims(0, 2).TranslateBy({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); TestDimExpression(original_transform, Dims("x", "z").TranslateBy({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); } TEST(TranslateBackwardByTest, Example) { const auto original_transform = IndexTransformBuilder<3, 3>() .input_origin({1, 2, 3}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_identity_transform() .Finalize() .value(); const auto expected_new_transform = IndexTransformBuilder<3, 3>() .input_origin({-9, 2, -17}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_single_input_dimension(0, 10, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, 20, 1, 2) .Finalize() .value(); const EquivalentIndices equivalent_indices = { {{2, 3, 3}, {-8, 3, -17}}, }; TestDimExpression(original_transform, Dims(0, 2).TranslateBackwardBy({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); TestDimExpression(original_transform, Dims("x", "z").TranslateBackwardBy({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); } TEST(TranslateToTest, Example) { const auto original_transform = IndexTransformBuilder<3, 3>() .input_origin({1, 2, 3}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_identity_transform() .Finalize() .value(); const auto expected_new_transform = IndexTransformBuilder<3, 3>() .input_origin({10, 2, 20}) .input_shape({3, 4, 2}) .input_labels({"x", "y", "z"}) .output_single_input_dimension(0, -9, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, -17, 1, 2) .Finalize() .value(); const EquivalentIndices equivalent_indices = { {{2, 3, 3}, {11, 3, 20}}, }; TestDimExpression(original_transform, Dims(0, 2).TranslateTo({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); TestDimExpression(original_transform, Dims(0, 2).TranslateTo({10, 20}), {0, 2}, expected_new_transform, expected_new_transform, equivalent_indices); } TEST(TranslateByTest, OneDimensionalConstant) { TestDimExpression( IndexTransformBuilder<1, 1>() .output_constant(0, 2) .Finalize() .value(), AllDims().TranslateBy(5), {0}, IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, -5, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .output_constant(0, 2) .Finalize() .value(), {{{4}, {9}}}); } TEST(TranslateByTest, OneDimensionalSingleInputDimension) { TestDimExpression( IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), AllDims().TranslateBy(5), {0}, IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, -5, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, 2 - 3 * 5, 3, 0) .Finalize() .value(), {{{4}, {9}}}); } TEST(TranslateByTest, OneDimensionalSingleInputDimensionImplicit) { TestDimExpression( IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), AllDims().TranslateBy(kImplicit), {0}, IndexTransformBuilder<1, 1>() .output_identity_transform() .Finalize() .value(), IndexTransformBuilder<1, 1>() .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), {{{4}, {4}}}); } TEST(TranslateByTest, OneDimensionalIndexArray) { TestDimExpression( IndexTransformBuilder<1, 1>() .input_origin({-2}) .input_shape({5}) .output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10})) .Finalize() .value(), AllDims().TranslateBy(5), {0}, IndexTransformBuilder<1, 1>() .input_origin({3}) .input_shape({5}) .output_single_input_dimension(0, -5, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .input_origin({3}) .input_shape({5}) .output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10})) .Finalize() .value(), {{{1}, {6}}}); } TEST(TranslateByTest, AllDimsUniform) { TestDimExpression( IndexTransformBuilder<3, 5>() .input_origin({-kInfIndex, 5, -kInfIndex}) .input_shape({kInfSize, 30, kInfIndex + 10}) .output_single_input_dimension(0, 1, 4, 0) .output_single_input_dimension(1, 2, 5, 0) .output_constant(2, 3) .output_single_input_dimension(3, 4, 7, 1) .output_single_input_dimension(4, 5, 8, 2) .Finalize() .value(), AllDims().TranslateBy(5), {0, 1, 2}, IndexTransformBuilder<3, 3>() .input_origin({-kInfIndex, 10, -kInfIndex}) .input_shape({kInfSize, 30, kInfIndex + 15}) .output_single_input_dimension(0, -5, 1, 0) .output_single_input_dimension(1, -5, 1, 1) .output_single_input_dimension(2, -5, 1, 2) .Finalize() .value(), IndexTransformBuilder<3, 5>() .input_origin({-kInfIndex, 10, -kInfIndex}) .input_shape({kInfSize, 30, kInfIndex + 15}) .output_single_input_dimension(0, 1 - 4 * 5, 4, 0) .output_single_input_dimension(1, 2 - 5 * 5, 5, 0) .output_constant(2, 3) .output_single_input_dimension(3, 4 - 7 * 5, 7, 1) .output_single_input_dimension(4, 5 - 8 * 5, 8, 2) .Finalize() .value(), {{{4, 5, 6}, {4 + 5, 5 + 5, 6 + 5}}}); } TEST(TranslateByTest, ErrorHandling) { TestDimExpressionError( IndexTransformBuilder<1, 1>().Finalize().value(), AllDims().TranslateBy(span<const Index>({1, 2})), absl::StatusCode::kInvalidArgument, "Number of dimensions \\(1\\) does not match number of " "indices \\(2\\)"); TestDimExpressionError(IndexTransformBuilder<1, 1>() .input_origin({kMinFiniteIndex}) .input_shape({10}) .Finalize() .value(), AllDims().TranslateBy(-kInfIndex), absl::StatusCode::kInvalidArgument, ".* is outside valid range .*"); TestDimExpressionError(IndexTransformBuilder<1, 1>() .input_origin({kMinFiniteIndex}) .input_shape({10}) .Finalize() .value(), AllDims().TranslateBy(-1), absl::StatusCode::kInvalidArgument, ".* is outside valid range .*"); TestDimExpressionError(IndexTransformBuilder<1, 1>() .input_origin({kMaxFiniteIndex - 1}) .input_shape({2}) .Finalize() .value(), AllDims().TranslateBy(1), absl::StatusCode::kInvalidArgument, ".* is outside valid range .*"); TestDimExpressionError(IndexTransformBuilder<1, 1>() .output_single_input_dimension( 0, std::numeric_limits<Index>::min(), 1, 0) .Finalize() .value(), AllDims().TranslateBy(1), absl::StatusCode::kInvalidArgument, "Integer overflow computing output offset .*"); } TEST(TranslateByTest, DimSubsetUniform) { TestDimExpression(IndexTransformBuilder<3, 2>() .input_origin({1, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7}) .output_single_input_dimension(0, 1, 1, 1) .output_single_input_dimension(1, 2, 2, 2) .Finalize() .value(), Dims(0, 2).TranslateBy(5), {0, 2}, IndexTransformBuilder<3, 3>() .input_origin({6, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7 + 5}) .output_single_input_dimension(0, -5, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, -5, 1, 2) .Finalize() .value(), IndexTransformBuilder<3, 2>() .input_origin({6, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7 + 5}) .output_single_input_dimension(0, 1, 1, 1) .output_single_input_dimension(1, 2 - 2 * 5, 2, 2) .Finalize() .value(), {{{4, 5, 6}, {4 + 5, 5, 6 + 5}}}); } TEST(TranslateByTest, DimSubsetNonUniform) { TestDimExpression(IndexTransformBuilder<3, 2>() .input_origin({1, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7}) .output_single_input_dimension(0, 1, 1, 1) .output_single_input_dimension(1, 2, 2, 2) .Finalize() .value(), Dims(0, 2).TranslateBy({5, 6}), {0, 2}, IndexTransformBuilder<3, 3>() .input_origin({6, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7 + 6}) .output_single_input_dimension(0, -5, 1, 0) .output_single_input_dimension(1, 1) .output_single_input_dimension(2, -6, 1, 2) .Finalize() .value(), IndexTransformBuilder<3, 2>() .input_origin({6, 2, -kInfIndex}) .input_shape({4, 5, kInfIndex + 7 + 6}) .output_single_input_dimension(0, 1, 1, 1) .output_single_input_dimension(1, 2 - 2 * 6, 2, 2) .Finalize() .value(), {{{3, 4, 5}, {3 + 5, 4, 5 + 6}}}); } TEST(TranslateToTest, OneDimensionalConstant) { TestDimExpression(IndexTransformBuilder<1, 1>() .input_origin({5}) .input_shape({10}) .output_constant(0, 2) .Finalize() .value(), AllDims().TranslateTo(8), {0}, IndexTransformBuilder<1, 1>() .input_origin({8}) .input_shape({10}) .output_single_input_dimension(0, -3, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .input_origin({8}) .input_shape({10}) .output_constant(0, 2) .Finalize() .value(), {{{7}, {10}}}); } TEST(TranslateToTest, OneDimensionalSingleInputDimension) { TestDimExpression(IndexTransformBuilder<1, 1>() .input_origin({4}) .input_shape({10}) .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), AllDims().TranslateTo(5), {0}, IndexTransformBuilder<1, 1>() .input_origin({5}) .input_shape({10}) .output_single_input_dimension(0, -1, 1, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .input_origin({5}) .input_shape({10}) .output_single_input_dimension(0, 2 - 3, 3, 0) .Finalize() .value(), {{{6}, {7}}}); } TEST(TranslateToTest, OneDimensionalSingleInputDimensionImplicit) { TestDimExpression(IndexTransformBuilder<1, 1>() .input_origin({4}) .input_shape({10}) .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), AllDims().TranslateTo(kImplicit), {0}, IndexTransformBuilder<1, 1>() .input_origin({4}) .input_shape({10}) .output_single_input_dimension(0, 0) .Finalize() .value(), IndexTransformBuilder<1, 1>() .input_origin({4}) .input_shape({10}) .output_single_input_dimension(0, 2, 3, 0) .Finalize() .value(), {{{6}, {6}}}); } TEST(TranslateToTest, TwoDimensionalSingleInputDimensionOneImplicit) { TestDimExpression(IndexTransformBuilder<2, 2>() .input_origin({4, 5}) .input_shape({10, 11}) .output_single_input_dimension(0, 2, 3, 0) .output_single_input_dimension(1, 4, 5, 1) .Finalize() .value(), AllDims().TranslateTo({kImplicit, 10}), {0, 1}, IndexTransformBuilder<2, 2>() .input_origin({4, 10}) .input_shape({10, 11}) .output_single_input_dimension(0, 0) .output_single_input_dimension(1, -5, 1, 1) .Finalize() .value(), IndexTransformBuilder<2, 2>() .input_origin({4, 10}) .input_shape({10, 11}) .output_single_input_dimension(0, 2, 3, 0) .output_single_input_dimension(1, -25 + 4, 5, 1) .Finalize() .value(), {{{6, 7}, {6, 12}}}); } TEST(TranslateToTest, ErrorHandling) { TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(), AllDims().TranslateTo(1), absl::StatusCode::kInvalidArgument, "Interval \\(-inf, \\+inf\\) is not bounded below"); TestDimExpressionError( IndexTransformBuilder<1, 1>() .input_origin({-5}) .input_shape({10}) .Finalize() .value(), AllDims().TranslateTo(std::numeric_limits<Index>::max()), absl::StatusCode::kOutOfRange, "Origin [0-9]+ is outside valid range .*"); }
#ifndef TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_ #define TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_ #include <stddef.h> #include <atomic> #include <string> #include <string_view> #include <utility> #include "tsl/platform/macros.h" #include "tsl/profiler/lib/nvtx_utils.h" #if !defined(IS_MOBILE_PLATFORM) #include "tsl/profiler/backends/cpu/annotation_stack.h" #endif namespace tsl::profiler { template <typename T> void PushAnnotation(const T& generator) { if (auto domain = DefaultProfilerDomain(); TF_PREDICT_FALSE(domain != nullptr)) { RangePush(domain, generator()); return; } #if !defined(IS_MOBILE_PLATFORM) if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) { AnnotationStack::PushAnnotation(static_cast<std::string_view>(generator())); } #endif } inline void PushAnnotation(const char* name) { PushAnnotation([&] { return name; }); } inline void PushAnnotation(const std::string& name) { PushAnnotation([&] { return name; }); } inline void PopAnnotation() { std::atomic_thread_fence(std::memory_order_acquire); if (auto domain = DefaultProfilerDomain(); TF_PREDICT_FALSE(domain != nullptr)) { RangePop(domain); return; } #if !defined(IS_MOBILE_PLATFORM) if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) { AnnotationStack::PopAnnotation(); } #endif } class ScopedAnnotation { public: template <typename T> explicit ScopedAnnotation(T&& annotation) { PushAnnotation(std::forward<T>(annotation)); } ~ScopedAnnotation() { PopAnnotation(); } static bool IsEnabled() { #if !defined(IS_MOBILE_PLATFORM) return AnnotationStack::IsEnabled(); #else return false; #endif } private: ScopedAnnotation(const ScopedAnnotation&) = delete; ScopedAnnotation& operator=(const ScopedAnnotation&) = delete; }; } #endif
#include "tsl/profiler/lib/scoped_annotation.h" #include <string> #include "absl/strings/str_cat.h" #include "tsl/platform/test.h" #include "tsl/platform/test_benchmark.h" #include "tsl/profiler/backends/cpu/annotation_stack.h" namespace tsl { namespace profiler { namespace { TEST(ScopedAnnotation, Simple) { { ScopedAnnotation trace("blah"); EXPECT_EQ(AnnotationStack::Get(), ""); } { AnnotationStack::Enable(true); ScopedAnnotation trace("blah"); EXPECT_EQ(AnnotationStack::Get(), "blah"); AnnotationStack::Enable(false); } { AnnotationStack::Enable(true); ScopedAnnotation outer("foo"); ScopedAnnotation inner("bar"); EXPECT_EQ(AnnotationStack::Get(), "foo::bar"); AnnotationStack::Enable(false); } { AnnotationStack::Enable(true); PushAnnotation("foo"); PushAnnotation("bar"); EXPECT_EQ(AnnotationStack::Get(), "foo::bar"); PopAnnotation(); PopAnnotation(); AnnotationStack::Enable(false); } EXPECT_EQ(AnnotationStack::Get(), ""); } std::string GenerateRandomString(int length) { return std::string(length, 'a'); } void BM_ScopedAnnotationDisabled(::testing::benchmark::State& state) { const int annotation_size = state.range(0); std::string annotation = GenerateRandomString(annotation_size); for (auto s : state) { ScopedAnnotation trace(annotation); } } BENCHMARK(BM_ScopedAnnotationDisabled)->Arg(8)->Arg(32)->Arg(128); void BM_ScopedAnnotationEnabled(::testing::benchmark::State& state) { const int annotation_size = state.range(0); std::string annotation = GenerateRandomString(annotation_size); AnnotationStack::Enable(true); for (auto s : state) { ScopedAnnotation trace(annotation); } AnnotationStack::Enable(false); } BENCHMARK(BM_ScopedAnnotationEnabled)->Arg(8)->Arg(32)->Arg(128); void BM_ScopedAnnotationEnabled_Nested(::testing::benchmark::State& state) { const int annotation_size = state.range(0); std::string annotation = GenerateRandomString(annotation_size); AnnotationStack::Enable(true); for (auto s : state) { ScopedAnnotation trace(annotation); { ScopedAnnotation trace(annotation); } } AnnotationStack::Enable(false); } BENCHMARK(BM_ScopedAnnotationEnabled_Nested)->Arg(8)->Arg(32)->Arg(128); void BM_ScopedAnnotationEnabled_Adhoc(::testing::benchmark::State& state) { AnnotationStack::Enable(true); int i = 0; for (auto s : state) { ScopedAnnotation trace(absl::StrCat(i, "-", i * i)); ++i; } AnnotationStack::Enable(false); } BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc); void BM_ScopedAnnotationDisabled_Lambda(::testing::benchmark::State& state) { int i = 0; for (auto s : state) { ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); }); ++i; } } BENCHMARK(BM_ScopedAnnotationDisabled_Lambda); void BM_ScopedAnnotationEnabled_Adhoc_Lambda( ::testing::benchmark::State& state) { AnnotationStack::Enable(true); int i = 0; for (auto s : state) { ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); }); ++i; } AnnotationStack::Enable(false); } BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc_Lambda); } } }
inline void PopAnnotation() { std::atomic_thread_fence(std::memory_order_acquire); if (auto domain = DefaultProfilerDomain(); TF_PREDICT_FALSE(domain != nullptr)) { RangePop(domain); return; } #if !defined(IS_MOBILE_PLATFORM) if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) { AnnotationStack::PopAnnotation(); } #endif }
#include "tsl/profiler/lib/scoped_annotation.h" #include <string> #include "absl/strings/str_cat.h" #include "tsl/platform/test.h" #include "tsl/platform/test_benchmark.h" #include "tsl/profiler/backends/cpu/annotation_stack.h" namespace tsl { namespace profiler { namespace { TEST(ScopedAnnotation, Simple) { { ScopedAnnotation trace("blah"); EXPECT_EQ(AnnotationStack::Get(), ""); } { AnnotationStack::Enable(true); ScopedAnnotation trace("blah"); EXPECT_EQ(AnnotationStack::Get(), "blah"); AnnotationStack::Enable(false); } { AnnotationStack::Enable(true); ScopedAnnotation outer("foo"); ScopedAnnotation inner("bar"); EXPECT_EQ(AnnotationStack::Get(), "foo::bar"); AnnotationStack::Enable(false); } { AnnotationStack::Enable(true); PushAnnotation("foo"); PushAnnotation("bar"); EXPECT_EQ(AnnotationStack::Get(), "foo::bar"); PopAnnotation(); PopAnnotation(); AnnotationStack::Enable(false); } EXPECT_EQ(AnnotationStack::Get(), ""); }
#include "xla/service/hlo_value_semantics_analysis.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <iterator> #include <memory> #include <numeric> #include <optional> #include <string> #include <string_view> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/memory/memory.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_value.h" #include "xla/shape.h" #include "xla/shape_tree.h" #include "xla/shape_util.h" #include "xla/side_effect_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { SendRecvGroupMap::SendRecvGroupMap(const HloModule& hlo_module) { for (HloComputation* computation : hlo_module.computations()) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->opcode() != HloOpcode::kSend && instruction->opcode() != HloOpcode::kRecv) { continue; } std::string rendezvous = instruction->frontend_attributes().map().at( kXlaHostTransferRendezvousNameAttr); auto send_recv_iter = host_transfer_rendezvous_map_.find(rendezvous); if (send_recv_iter == host_transfer_rendezvous_map_.end()) { auto insert_success = host_transfer_rendezvous_map_.insert( {rendezvous, SendRecvGroup{nullptr, nullptr}}); send_recv_iter = insert_success.first; } if (instruction->opcode() == HloOpcode::kSend) { send_recv_iter->second.send = instruction; } else { send_recv_iter->second.recv = instruction; } } } } absl::StatusOr<HloInstruction*> SendRecvGroupMap::GetMatchingSendOrRecv( HloInstruction* send_or_recv) const { if (send_or_recv->opcode() != HloOpcode::kSend && send_or_recv->opcode() != HloOpcode::kRecv) { return InvalidArgument("Expecting only send or recv"); } std::string rendezvous = send_or_recv->frontend_attributes().map().at( kXlaHostTransferRendezvousNameAttr); auto send_recv_iter = host_transfer_rendezvous_map_.find(rendezvous); if (send_recv_iter == host_transfer_rendezvous_map_.end()) { return Internal("Missing send or recv from send recv group."); } if (send_or_recv->opcode() == HloOpcode::kSend) { return send_recv_iter->second.recv; } return send_recv_iter->second.send; } bool HloPreOrderDFS::IsReady(const HloInstruction* instruction) const { for (HloInstruction* user : instruction->users()) { if (!visited_.contains(user)) { return false; } } return true; } namespace { std::vector<HloInstruction*> GetAllInstructionsWithZeroUsers( const HloComputation& computation) { std::vector<HloInstruction*> results; for (HloInstruction* instruction : computation.instructions()) { if (instruction->users().empty()) { results.push_back(instruction); } } return results; } } absl::Status HloPreOrderDFS::Run(const HloComputation& computation, DfsHloVisitorBase<HloInstruction*>* visitor) { stack_.clear(); visited_.clear(); std::vector<HloInstruction*> roots = GetAllInstructionsWithZeroUsers(computation); for (HloInstruction* root : roots) { stack_.push_back(root); } while (!stack_.empty()) { HloInstruction* to_visit = stack_.back(); stack_.pop_back(); if (visited_.contains(to_visit)) { continue; } visited_.insert(to_visit); for (HloInstruction* operand : to_visit->mutable_operands()) { if (IsReady(operand)) { stack_.push_back(operand); } } TF_RETURN_IF_ERROR(visitor->Preprocess(to_visit)); TF_RETURN_IF_ERROR(to_visit->Visit(visitor)); TF_RETURN_IF_ERROR(visitor->Postprocess(to_visit)); } return absl::OkStatus(); } namespace { template <typename T> std::string ToString(T element) { return absl::StrCat(element); } template <> std::string ToString(const HloValueSemantics* element) { return element->ToString(); } template <typename T> std::string ToString(const ShapeTree<T>& tree) { std::string str; tree.ForEachElement([&str, &tree](const ShapeIndex& shape_index, T element) { auto subshape = ShapeUtil::GetSubshape(tree.shape(), (shape_index)); absl::StrAppend(&str, shape_index.ToString(), ", ", subshape.ToString(), ": ", ToString(element), "\n"); }); return str; } } absl::Status EinsumDepthAnalysis::RunInternal( const HloComputation& computation, const std::optional<ShapeTree<int>>& root_depth) { std::vector<HloInstruction*> roots = GetAllInstructionsWithZeroUsers(computation); for (HloInstruction* root : roots) { if (root == computation.root_instruction()) { if (root_depth.has_value()) { TF_RETURN_IF_ERROR(SetInstructionDepth(root, *root_depth)); } else { TF_RETURN_IF_ERROR(SetInstructionDepth(root, 0)); } } else { GetOrCreateDepthTree(root); } } HloPreOrderDFS dfs; return dfs.Run(computation, this); } absl::StatusOr<std::unique_ptr<EinsumDepthAnalysis>> EinsumDepthAnalysis::Run( const HloComputation& computation, const SendRecvGroupMap& send_recv_group_map) { EinsumDepthAnalysis* analysis_ptr = new EinsumDepthAnalysis(send_recv_group_map); std::unique_ptr<EinsumDepthAnalysis> analysis(analysis_ptr); TF_RETURN_IF_ERROR(analysis->RunInternal(computation, std::nullopt)); return analysis; } namespace { int MergeDepth(int original_depth, int new_depth) { if (new_depth >= 0) { return std::max(original_depth, new_depth); } if (new_depth < 0 && original_depth < 0) { return std::min(original_depth, new_depth); } return original_depth; } void SetDepth(ShapeTree<int>& depth_tree, int depth) { depth_tree.ForEachMutableElement( [depth, &depth_tree](const ShapeIndex& shape_index, int* depth_ptr) { if (depth_tree.IsLeaf(shape_index)) { *depth_ptr = MergeDepth(*depth_ptr, depth); } }); } void SetDepth(ShapeTree<int>& depth_tree, const ShapeTree<int>& source) { depth_tree.ForEachMutableElement( [&depth_tree, &source](const ShapeIndex& shape_index, int* depth_ptr) { if (depth_tree.IsLeaf(shape_index)) { *depth_ptr = MergeDepth(*depth_ptr, source.element(shape_index)); } }); } int GetMaxDepth(const ShapeTree<int>& depth_tree) { int max_depth = -1; depth_tree.ForEachElement( [&max_depth](const ShapeIndex& shape_index, int depth) { max_depth = std::max(max_depth, depth); return absl::OkStatus(); }); if (max_depth >= 0) { return max_depth; } depth_tree.ForEachElement( [&max_depth](const ShapeIndex& shape_index, int depth) { max_depth = std::min(max_depth, depth); return absl::OkStatus(); }); return max_depth; } void SetDepthFromTupleDepth(ShapeTree<int>& depth_tree, const ShapeTree<int>& tuple_depth_tree, int tuple_index) { depth_tree.ForEachMutableElement( [&depth_tree, &tuple_depth_tree, tuple_index]( const ShapeIndex& shape_index, int* depth_ptr) { if (depth_tree.IsLeaf(shape_index)) { ShapeIndex output_index = shape_index; output_index.push_front(tuple_index); *depth_ptr = MergeDepth(*depth_ptr, tuple_depth_tree.element(output_index)); } }); } } ShapeTree<int>& EinsumDepthAnalysis::GetOrCreateDepthTree( const HloInstruction* instruction) { auto depth_iter = einsum_depth_map_.find(instruction); if (depth_iter == einsum_depth_map_.end()) { ShapeTree<int> depth_tree(instruction->shape(), -1); auto inserted = einsum_depth_map_.insert( std::make_pair(instruction, std::move(depth_tree))); depth_iter = inserted.first; } return depth_iter->second; } ShapeTree<int>& EinsumDepthAnalysis::GetDepthTreeOrDie( const HloInstruction* instruction) { auto depth_iter = einsum_depth_map_.find(instruction); CHECK(depth_iter != einsum_depth_map_.end()) << "No depth tree found for instruction: " << instruction->ToString(); return depth_iter->second; } absl::Status EinsumDepthAnalysis::SetInstructionDepth( const HloInstruction* instruction, int depth) { ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction); SetDepth(depth_tree, depth); return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::SetInstructionDepth( const HloInstruction* instruction, const ShapeTree<int>& depth) { ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction); SetDepth(depth_tree, depth); return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::SetInstructionDepthFromTupleDepth( const HloInstruction* instruction, const ShapeTree<int>& tuple_depth_tree, int tuple_index) { ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction); SetDepthFromTupleDepth(depth_tree, tuple_depth_tree, tuple_index); return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::DefaultAction(HloInstruction* instruction) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(instruction); int max_depth = GetMaxDepth(depth_tree); for (int operand_index = 0; operand_index < instruction->operand_count(); ++operand_index) { const HloInstruction* operand = instruction->operand(operand_index); TF_RETURN_IF_ERROR(SetInstructionDepth(operand, max_depth)); } return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::HandleTuple(HloInstruction* tuple) { return HandleTupleLike(tuple); } absl::Status EinsumDepthAnalysis::HandleAllReduce(HloInstruction* all_reduce) { if (all_reduce->shape().IsArray()) { return DefaultAction(all_reduce); } return HandleTupleLike(all_reduce); } absl::Status EinsumDepthAnalysis::HandleTupleLike(HloInstruction* tuple_like) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(tuple_like); for (int operand_index = 0; operand_index < tuple_like->operand_count(); ++operand_index) { HloInstruction* operand = tuple_like->mutable_operand(operand_index); ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand); SetDepthFromTupleDepth(operand_depth, depth_tree, operand_index); } return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::HandleGetTupleElement( HloInstruction* get_tuple_element) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(get_tuple_element); HloInstruction* operand = get_tuple_element->mutable_operand(0); int tuple_index = get_tuple_element->tuple_index(); ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand); operand_depth.ForEachMutableElement( [&operand_depth, &depth_tree, tuple_index](const ShapeIndex& shape_index, int* depth_ptr) { if (shape_index.empty() || shape_index.front() != tuple_index) { return; } if (operand_depth.IsLeaf(shape_index)) { ShapeIndex output_index = shape_index; output_index.pop_front(); *depth_ptr = MergeDepth(*depth_ptr, depth_tree.element(output_index)); } }); return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::HandleDepthIncrementInstruction( HloInstruction* instruction) { ShapeTree<int>& depth_tree = GetDepthTreeOrDie(instruction); int instruction_depth = depth_tree.element({}); for (HloInstruction* operand : instruction->mutable_operands()) { TF_RETURN_IF_ERROR(SetInstructionDepth( operand, instruction_depth >= 0 ? instruction_depth + 1 : instruction_depth - 1)); } return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::HandleDot(HloInstruction* dot) { return HandleDepthIncrementInstruction(dot); } absl::Status EinsumDepthAnalysis::HandleConvolution( HloInstruction* convolution) { return HandleDepthIncrementInstruction(convolution); } absl::Status EinsumDepthAnalysis::HandleCall(HloInstruction* call) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(call); return HandleCalledComputation(*call->called_computations()[0], depth_tree, call->operands()); } absl::Status EinsumDepthAnalysis::HandleFusion(HloInstruction* fusion) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(fusion); return HandleCalledComputation(*fusion->called_computations()[0], depth_tree, fusion->operands()); } absl::Status EinsumDepthAnalysis::HandleWhile(HloInstruction* xla_while) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(xla_while); int max_depth = GetMaxDepth(depth_tree); HloComputation* condition_computation = xla_while->while_condition(); HloInstruction* condition_root = condition_computation->root_instruction(); ShapeTree<int> condition_depth(condition_root->shape(), max_depth); TF_RETURN_IF_ERROR(HandleCalledComputation( *condition_computation, condition_depth, xla_while->operands())); const ShapeTree<int>* root_depth_ptr = &depth_tree; HloComputation* body_computation = xla_while->while_body(); bool run_depth_propagation_on_body = true; ShapeTree<int>& root_depth = GetOrCreateDepthTree(body_computation->root_instruction()); while (run_depth_propagation_on_body) { run_depth_propagation_on_body = false; TF_RETURN_IF_ERROR(HandleCalledComputation( *body_computation, *root_depth_ptr, xla_while->operands())); HloInstruction* operand = body_computation->parameter_instruction(0); const ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand); root_depth.ForEachMutableElement( [&run_depth_propagation_on_body, &root_depth, &operand_depth]( const ShapeIndex& shape_index, int* depth_ptr) { if (!root_depth.IsLeaf(shape_index)) { return; } if (root_depth.element(shape_index) < 0 && operand_depth.element(shape_index) >= 0) { *depth_ptr = operand_depth.element(shape_index); run_depth_propagation_on_body = true; } }); root_depth_ptr = &root_depth; } return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::HandleConditional( HloInstruction* conditional) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(conditional); TF_RETURN_IF_ERROR( SetInstructionDepth(conditional->operands()[0], depth_tree)); for (int i = 0; i < conditional->branch_count(); ++i) { TF_RETURN_IF_ERROR( HandleCalledComputation(*conditional->called_computations()[i], depth_tree, {conditional->operands()[i + 1]})); } return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::HandleCalledComputation( const HloComputation& called_computation, const ShapeTree<int>& root_depth, absl::Span<HloInstruction* const> operands) { TF_RETURN_IF_ERROR(RunInternal(called_computation, std::optional<ShapeTree<int>>(root_depth))); for (int i = 0; i < operands.size(); ++i) { HloInstruction* operand = operands[i]; HloInstruction* parameter = called_computation.parameter_instruction(i); const ShapeTree<int>& parameter_depth = GetOrCreateDepthTree(parameter); TF_RETURN_IF_ERROR(SetInstructionDepth(operand, parameter_depth)); } return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::HandleAfterAll(HloInstruction* after_all) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(after_all); int max_depth = GetMaxDepth(depth_tree); for (HloInstruction* operand_token : after_all->mutable_operands()) { CHECK(operand_token->shape().IsToken()); TF_RETURN_IF_ERROR(SetInstructionDepth(operand_token, max_depth)); } return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::HandleSend(HloInstruction* send) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(send); HloInstruction* send_buffer = send->mutable_operand(0); ShapeTree<int>& send_buffer_depth = GetOrCreateDepthTree(send_buffer); SetDepthFromTupleDepth(send_buffer_depth, depth_tree, 0); int max_depth = GetMaxDepth(depth_tree); HloInstruction* token = send->mutable_operand(1); return SetInstructionDepth(token, max_depth); } absl::Status EinsumDepthAnalysis::HandleRecv(HloInstruction* recv) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(recv); TF_ASSIGN_OR_RETURN(HloInstruction * send, send_recv_group_map_->GetMatchingSendOrRecv(recv)); CHECK(send) << "recv: " << recv->name() << " not found in send_recv_group_map: " << recv->ToString(); ShapeTree<int>& send_depth = GetOrCreateDepthTree(send); int max_depth = GetMaxDepth(depth_tree); send_depth.ForEachMutableElement([&depth_tree, &send_depth, max_depth]( const ShapeIndex& index, int* depth) { if (!send_depth.IsLeaf(index)) { return; } if (index.front() == 0) { *depth = MergeDepth(*depth, depth_tree.element(index)); return; } *depth = MergeDepth(*depth, max_depth); }); HloInstruction* after_all = recv->mutable_operand(0); return SetInstructionDepth(after_all, max_depth); } absl::Status EinsumDepthAnalysis::HandleSendDone(HloInstruction* send_done) { HloInstruction* send = send_done->mutable_operand(0); const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(send_done); int max_depth = GetMaxDepth(depth_tree); return SetInstructionDepth(send, max_depth); } absl::Status EinsumDepthAnalysis::HandleRecvDone(HloInstruction* recv_done) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(recv_done); int max_depth = GetMaxDepth(depth_tree); HloInstruction* recv = recv_done->mutable_operand(0); ShapeTree<int>& recv_depth = GetOrCreateDepthTree(recv); recv_depth.ForEachMutableElement([&depth_tree, &recv_depth, max_depth]( const ShapeIndex& index, int* depth) { if (!recv_depth.IsLeaf(index)) { return; } if (index.front() == 0) { *depth = MergeDepth(*depth, depth_tree.element(index)); return; } *depth = MergeDepth(*depth, max_depth); }); return absl::OkStatus(); } absl::Status EinsumDepthAnalysis::HandleAsyncStart( HloInstruction* async_start) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(async_start); TF_ASSIGN_OR_RETURN(ShapeTree<int> output_depth_tree, depth_tree.SubShapeTree({1})); return HandleCalledComputation(*(async_start->async_wrapped_computation()), output_depth_tree, async_start->operands()); } absl::Status EinsumDepthAnalysis::HandleAsyncDone(HloInstruction* async_done) { const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(async_done); HloInstruction* async_start = async_done->mutable_operand(0); ShapeTree<int>& async_start_depth = GetOrCreateDepthTree(async_start); async_start_depth.ForEachMutableElement( [&depth_tree, &async_start_depth](const ShapeIndex& index, int* depth) { if (!async_start_depth.IsLeaf(index)) { return; } if (index.front() == 1) { ShapeIndex output_index = index; output_index.pop_front(); *depth = MergeDepth(*depth, depth_tree.element(output_index)); } }); return absl::OkStatus(); } namespace { int MergeHeight(int original_height, int new_height) { return std::max(original_height, new_height); } void SetHeight(ShapeTree<int>& height_tree, int height) { height_tree.ForEachMutableElement( [height, &height_tree](const ShapeIndex& shape_index, int* height_ptr) { if (height_tree.IsLeaf(shape_index)) { *height_ptr = MergeHeight(*height_ptr, height); } }); } void SetHeight(ShapeTree<int>& height_tree, const ShapeTree<int>& source, const ShapeIndex& source_index = {}, const ShapeIndex& target_index = {}) { height_tree.ForEachMutableElement( [&source, &source_index, &target_index](const ShapeIndex& shape_index, int* height_ptr) { if (shape_index.size() < target_index.size()) { return; } for (int i = 0; i < target_index.size(); ++i) { if (shape_index[i] != target_index[i]) { return; } } ShapeIndex complete_source_index = source_index; for (int i = target_index.size(); i < shape_index.size(); ++i) { complete_source_index.push_back(shape_index[i]); } *height_ptr = MergeHeight(*height_ptr, source.element(complete_source_index)); }); } int GetMaxHeight(const ShapeTree<int>& height_tree) { int max_height = 0; height_tree.ForEachElement( [&max_height](const ShapeIndex& shape_index, int height) { max_height = std::max(max_height, height); return absl::OkStatus(); }); return max_height; } int GetMaxOperandHeight(HloInstruction* instruction, const EinsumHeightMap& einsum_height_map) { int max_height = 0; for (HloInstruction* operand : instruction->mutable_operands()) { auto operand_height_iter = einsum_height_map.find(operand); CHECK(operand_height_iter != einsum_height_map.end()) << "operand: " << operand->name(); const ShapeTree<int>& operand_height_tree = operand_height_iter->second; int max_operand_height = GetMaxHeight(operand_height_tree); max_height = std::max(max_height, max_operand_height); } return max_height; } } absl::StatusOr<std::unique_ptr<EinsumHeightAnalysis>> EinsumHeightAnalysis::Run( const HloComputation& computation, const SendRecvGroupMap& send_recv_group_map) { EinsumHeightAnalysis* analysis_ptr = new EinsumHeightAnalysis(send_recv_group_map); std::unique_ptr<EinsumHeightAnalysis> analysis(analysis_ptr); TF_RETURN_IF_ERROR(analysis->RunInternal(computation, {})); TF_RETURN_IF_ERROR(analysis->RunInternal(computation, {})); return analysis; } absl::Status EinsumHeightAnalysis::RunInternal( const HloComputation& computation, absl::Span<HloInstruction* const> operands) { return HandleCalledComputation(computation, operands); } ShapeTree<int>& EinsumHeightAnalysis::GetOrCreateHeightTree( const HloInstruction* instruction) { auto height_iter = einsum_height_map_.find(instruction); if (height_iter == einsum_height_map_.end()) { ShapeTree<int> height_tree(instruction->shape(), 0); auto inserted = einsum_height_map_.insert( std::make_pair(instruction, std::move(height_tree))); height_iter = inserted.first; } return height_iter->second; } ShapeTree<int>& EinsumHeightAnalysis::GetHeightTreeOrDie( const HloInstruction* instruction) { auto height_iter = einsum_height_map_.find(instruction); CHECK(height_iter != einsum_height_map_.end()); return height_iter->second; } bool EinsumHeightAnalysis::HasHeightFor( const HloInstruction* instruction) const { return einsum_height_map_.contains(instruction); } absl::Status EinsumHeightAnalysis::SetInstructionHeight( const HloInstruction* instruction, int height) { ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction); SetHeight(height_tree, height); return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::SetInstructionHeight( const HloInstruction* instruction, const ShapeTree<int>& height) { ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction); SetHeight(height_tree, height); return absl::OkStatus(); } #define RETURN_IF_HEIGHT_EXISTS(instruction) \ if (HasHeightFor(instruction)) { \ return absl::OkStatus(); \ } absl::Status EinsumHeightAnalysis::HandleHeightIncrementInstruction( HloInstruction* instruction) { ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction); for (HloInstruction* operand : instruction->mutable_operands()) { const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand); SetHeight(height_tree, operand_height_tree.element({}) + 1); } return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::HandleCalledComputation( const HloComputation& computation, absl::Span<HloInstruction* const> operands) { if (!operands.empty()) { if (computation.num_parameters() != operands.size()) { return absl::InvalidArgumentError(absl::StrCat( operands.size(), " operands were passed for the computation ", computation.name(), " with ", computation.num_parameters(), " parameters.")); } for (int parameter_index = 0; parameter_index < computation.num_parameters(); ++parameter_index) { HloInstruction* parameter = computation.parameter_instruction(parameter_index); HloInstruction* operand = operands[parameter_index]; const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand); TF_RETURN_IF_ERROR(SetInstructionHeight(parameter, operand_height_tree)); } } for (HloInstruction* instruction : computation.instructions()) { if (instruction->user_count() == 0) { TF_RETURN_IF_ERROR(instruction->Accept(this)); } } return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::DefaultAction(HloInstruction* instruction) { RETURN_IF_HEIGHT_EXISTS(instruction); int instruction_height = GetMaxOperandHeight(instruction, einsum_height_map_); return SetInstructionHeight(instruction, instruction_height); } absl::Status EinsumHeightAnalysis::HandleTupleLike(HloInstruction* tuple_like) { ShapeTree<int>& height_tree = GetOrCreateHeightTree(tuple_like); height_tree.ForEachMutableElement([&height_tree, tuple_like, this]( const ShapeIndex& index, int* height) { if (!height_tree.IsLeaf(index)) { return; } int operand_index = index.front(); const HloInstruction* operand = tuple_like->operand(operand_index); const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand); ShapeIndex source_index = index; source_index.pop_front(); *height = MergeHeight(*height, operand_height_tree.element(source_index)); }); return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::HandleTuple(HloInstruction* tuple) { RETURN_IF_HEIGHT_EXISTS(tuple); return HandleTupleLike(tuple); } absl::Status EinsumHeightAnalysis::HandleGetTupleElement( HloInstruction* get_tuple_element) { RETURN_IF_HEIGHT_EXISTS(get_tuple_element); ShapeTree<int>& height_tree = GetOrCreateHeightTree(get_tuple_element); const ShapeTree<int>& tuple_height_tree = GetHeightTreeOrDie(get_tuple_element->operand(0)); int tuple_index = get_tuple_element->tuple_index(); SetHeight(height_tree, tuple_height_tree, {tuple_index}, {}); return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::HandleDot(HloInstruction* dot) { RETURN_IF_HEIGHT_EXISTS(dot); return HandleHeightIncrementInstruction(dot); } absl::Status EinsumHeightAnalysis::HandleConvolution( HloInstruction* convolution) { RETURN_IF_HEIGHT_EXISTS(convolution); return HandleHeightIncrementInstruction(convolution); } absl::Status EinsumHeightAnalysis::HandleCall(HloInstruction* call) { RETURN_IF_HEIGHT_EXISTS(call); TF_RETURN_IF_ERROR(HandleCalledComputation(*(call->called_computations()[0]), call->mutable_operands())); const ShapeTree<int>& root_height_tree = GetHeightTreeOrDie(call->called_computations()[0]->root_instruction()); TF_RETURN_IF_ERROR(SetInstructionHeight(call, root_height_tree)); return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::HandleFusion(HloInstruction* fusion) { RETURN_IF_HEIGHT_EXISTS(fusion); return HandleCall(fusion); } absl::Status EinsumHeightAnalysis::HandleWhile(HloInstruction* xla_while) { RETURN_IF_HEIGHT_EXISTS(xla_while); TF_RETURN_IF_ERROR(HandleCalledComputation(*(xla_while->while_condition()), xla_while->mutable_operands())); TF_RETURN_IF_ERROR(HandleCalledComputation(*(xla_while->while_body()), xla_while->mutable_operands())); const ShapeTree<int>& root_height_tree = GetHeightTreeOrDie(xla_while->while_body()->root_instruction()); return SetInstructionHeight(xla_while, root_height_tree); } absl::Status EinsumHeightAnalysis::HandleConditional( HloInstruction* conditional) { RETURN_IF_HEIGHT_EXISTS(conditional); ShapeTree<int>& height_tree = GetOrCreateHeightTree(conditional); for (size_t i = 0; i < conditional->branch_count(); ++i) { HloComputation* computation = conditional->branch_computation(i); TF_RETURN_IF_ERROR(HandleCalledComputation( *computation, {conditional->mutable_operands()[i + 1]})); ShapeTree<int>& branch_root_height_tree = GetHeightTreeOrDie(computation->root_instruction()); SetHeight(height_tree, branch_root_height_tree); } return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::HandleSend(HloInstruction* send) { RETURN_IF_HEIGHT_EXISTS(send); HloInstruction* send_buffer = send->mutable_operand(0); const ShapeTree<int>& send_buffer_height_tree = GetHeightTreeOrDie(send_buffer); ShapeTree<int>& height_tree = GetOrCreateHeightTree(send); SetHeight(height_tree, send_buffer_height_tree, {}, {0}); return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::HandleRecv(HloInstruction* recv) { RETURN_IF_HEIGHT_EXISTS(recv); TF_ASSIGN_OR_RETURN(HloInstruction * send, send_recv_group_map_->GetMatchingSendOrRecv(recv)); TF_RETURN_IF_ERROR(send->Accept(this)); HloInstruction* send_buffer = send->mutable_operand(0); const ShapeTree<int>& send_buffer_height_tree = GetHeightTreeOrDie(send_buffer); ShapeTree<int>& height_tree = GetOrCreateHeightTree(recv); SetHeight(height_tree, send_buffer_height_tree, {}, {0}); return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::HandleSendDone(HloInstruction* send_done) { RETURN_IF_HEIGHT_EXISTS(send_done); GetOrCreateHeightTree(send_done); return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::HandleRecvDone(HloInstruction* recv_done) { RETURN_IF_HEIGHT_EXISTS(recv_done); HloInstruction* recv = recv_done->mutable_operand(0); const ShapeTree<int>& recv_height_tree = GetHeightTreeOrDie(recv); ShapeTree<int>& height_tree = GetOrCreateHeightTree(recv_done); SetHeight(height_tree, recv_height_tree, {0}, {0}); return absl::OkStatus(); } absl::Status EinsumHeightAnalysis::HandleAllReduce(HloInstruction* all_reduce) { RETURN_IF
#include "xla/service/hlo_value_semantics_analysis.h" #include <memory> #include <string> #include <gtest/gtest.h> #include "absl/log/log.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { const char kMnistHlo[] = R"( HloModule MnistTrainingLoopWithInfeed.140, entry_computation_layout={(f32[784,128]{1,0:T(8,128)},f32[128]{0:T(256)},f32[128,32]{1,0:T(8,128)},f32[32]{0:T(256)},f32[32,10]{1,0:T(8,128)},f32[10]{0:T(256)})->(f32[784,128]{1,0:T(8,128)}, f32[128]{0:T(256)}, f32[128,32]{1,0:T(8,128)}, f32[32]{0:T(256)}, f32[32,10]{1,0:T(8,128)}, f32[10]{0:T(256)})} relu.9 { x.10 = f32[] parameter(0) constant.11 = f32[] constant(0) ROOT maximum.12 = f32[] maximum(x.10, constant.11) } max_F32.17 { lhs.18 = f32[] parameter(0) rhs.19 = f32[] parameter(1) ROOT maximum.20 = f32[] maximum(lhs.18, rhs.19) } add_F32.1 { lhs.22 = f32[] parameter(0) rhs.23 = f32[] parameter(1) ROOT add.24 = f32[] add(lhs.22, rhs.23) } relu_gradients.29 { activation.30 = f32[] parameter(0) constant.32 = f32[] constant(0) compare.33 = pred[] compare(activation.30, constant.32), direction=GT backprop.31 = f32[] parameter(1) ROOT select.34 = f32[] select(compare.33, backprop.31, constant.32) } body.49 { after-all.51 = token[] after-all() infeed.52 = ((f32[100,784]{1,0}, f32[100,10]{1,0}, pred[]), token[]) infeed(after-all.51) get.53 = (f32[100,784]{1,0}, f32[100,10]{1,0}, pred[]) get-tuple-element(infeed.52), index=0 get.54 = f32[100,784]{1,0} get-tuple-element(get.53), index=0 prev.50 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) parameter(0) get.57 = f32[784,128]{1,0} get-tuple-element(prev.50), index=0 dot.63 = f32[100,128]{1,0} dot(get.54, get.57), lhs_contracting_dims={1}, rhs_contracting_dims={0} get.58 = f32[128]{0} get-tuple-element(prev.50), index=1 broadcast.64 = f32[100,128]{1,0} broadcast(get.58), dimensions={1} add.65 = f32[100,128]{1,0} add(dot.63, broadcast.64) map.66 = f32[100,128]{1,0} map(add.65), dimensions={0,1}, to_apply=relu.9 get.59 = f32[128,32]{1,0} get-tuple-element(prev.50), index=2 dot.67 = f32[100,32]{1,0} dot(map.66, get.59), lhs_contracting_dims={1}, rhs_contracting_dims={0} get.60 = f32[32]{0} get-tuple-element(prev.50), index=3 broadcast.68 = f32[100,32]{1,0} broadcast(get.60), dimensions={1} add.69 = f32[100,32]{1,0} add(dot.67, broadcast.68) map.70 = f32[100,32]{1,0} map(add.69), dimensions={0,1}, to_apply=relu.9 get.61 = f32[32,10]{1,0} get-tuple-element(prev.50), index=4 dot.71 = f32[100,10]{1,0} dot(map.70, get.61), lhs_contracting_dims={1}, rhs_contracting_dims={0} get.62 = f32[10]{0} get-tuple-element(prev.50), index=5 broadcast.72 = f32[100,10]{1,0} broadcast(get.62), dimensions={1} add.73 = f32[100,10]{1,0} add(dot.71, broadcast.72) constant.74 = f32[] constant(-inf) reduce.75 = f32[100]{0} reduce(add.73, constant.74), dimensions={1}, to_apply=max_F32.17 broadcast.76 = f32[100,10]{1,0} broadcast(reduce.75), dimensions={0} subtract.77 = f32[100,10]{1,0} subtract(add.73, broadcast.76) exponential.78 = f32[100,10]{1,0} exponential(subtract.77) constant.79 = f32[] constant(0) reduce.80 = f32[100]{0} reduce(exponential.78, constant.79), dimensions={1}, to_apply=add_F32.1 broadcast.81 = f32[100,10]{1,0} broadcast(reduce.80), dimensions={0} divide.82 = f32[100,10]{1,0} divide(exponential.78, broadcast.81) get.55 = f32[100,10]{1,0} get-tuple-element(get.53), index=1 subtract.83 = f32[100,10]{1,0} subtract(divide.82, get.55) transpose.88 = f32[10,32]{0,1} transpose(get.61), dimensions={1,0} dot.89 = f32[100,32]{1,0} dot(subtract.83, transpose.88), lhs_contracting_dims={1}, rhs_contracting_dims={0} map.90 = f32[100,32]{1,0} map(map.70, dot.89), dimensions={0,1}, to_apply=relu_gradients.29 transpose.95 = f32[32,128]{0,1} transpose(get.59), dimensions={1,0} dot.96 = f32[100,128]{1,0} dot(map.90, transpose.95), lhs_contracting_dims={1}, rhs_contracting_dims={0} map.97 = f32[100,128]{1,0} map(map.66, dot.96), dimensions={0,1}, to_apply=relu_gradients.29 transpose.98 = f32[784,100]{0,1} transpose(get.54), dimensions={1,0} dot.99 = f32[784,128]{1,0} dot(transpose.98, map.97), lhs_contracting_dims={1}, rhs_contracting_dims={0} constant.104 = f32[] constant(0.01) broadcast.105 = f32[784,128]{1,0} broadcast(constant.104), dimensions={} multiply.106 = f32[784,128]{1,0} multiply(dot.99, broadcast.105) subtract.107 = f32[784,128]{1,0} subtract(get.57, multiply.106) reduce.101 = f32[128]{0} reduce(map.97, constant.79), dimensions={0}, to_apply=add_F32.1 broadcast.109 = f32[128]{0} broadcast(constant.104), dimensions={} multiply.110 = f32[128]{0} multiply(reduce.101, broadcast.109) subtract.111 = f32[128]{0} subtract(get.58, multiply.110) transpose.91 = f32[128,100]{0,1} transpose(map.66), dimensions={1,0} dot.92 = f32[128,32]{1,0} dot(transpose.91, map.90), lhs_contracting_dims={1}, rhs_contracting_dims={0} broadcast.113 = f32[128,32]{1,0} broadcast(constant.104), dimensions={} multiply.114 = f32[128,32]{1,0} multiply(dot.92, broadcast.113) subtract.115 = f32[128,32]{1,0} subtract(get.59, multiply.114) reduce.94 = f32[32]{0} reduce(map.90, constant.79), dimensions={0}, to_apply=add_F32.1 broadcast.117 = f32[32]{0} broadcast(constant.104), dimensions={} multiply.118 = f32[32]{0} multiply(reduce.94, broadcast.117) subtract.119 = f32[32]{0} subtract(get.60, multiply.118) transpose.84 = f32[32,100]{0,1} transpose(map.70), dimensions={1,0} dot.85 = f32[32,10]{1,0} dot(transpose.84, subtract.83), lhs_contracting_dims={1}, rhs_contracting_dims={0} broadcast.121 = f32[32,10]{1,0} broadcast(constant.104), dimensions={} multiply.122 = f32[32,10]{1,0} multiply(dot.85, broadcast.121) subtract.123 = f32[32,10]{1,0} subtract(get.61, multiply.122) reduce.87 = f32[10]{0} reduce(subtract.83, constant.79), dimensions={0}, to_apply=add_F32.1 broadcast.125 = f32[10]{0} broadcast(constant.104), dimensions={} multiply.126 = f32[10]{0} multiply(reduce.87, broadcast.125) subtract.127 = f32[10]{0} subtract(get.62, multiply.126) get.56 = pred[] get-tuple-element(get.53), index=2 ROOT tuple.128 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) tuple(subtract.107, subtract.111, subtract.115, subtract.119, subtract.123, subtract.127, get.56) } condition.129 { prev.130 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) parameter(0) ROOT get.131 = pred[] get-tuple-element(prev.130), index=6 } ENTRY MnistTrainingLoopWithInfeed.140 { layer1_weights.1 = f32[784,128]{1,0} parameter(0) layer1_biases.2 = f32[128]{0} parameter(1) layer2_weights.3 = f32[128,32]{1,0} parameter(2) layer2_biases.4 = f32[32]{0} parameter(3) layer3_weights.5 = f32[32,10]{1,0} parameter(4) layer3_biases.6 = f32[10]{0} parameter(5) constant.7 = pred[] constant(true) tuple.8 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) tuple(layer1_weights.1, layer1_biases.2, layer2_weights.3, layer2_biases.4, layer3_weights.5, layer3_biases.6, constant.7) while.132 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) while(tuple.8), condition=condition.129, body=body.49 get.133 = f32[784,128]{1,0} get-tuple-element(while.132), index=0 get.134 = f32[128]{0} get-tuple-element(while.132), index=1 get.135 = f32[128,32]{1,0} get-tuple-element(while.132), index=2 get.136 = f32[32]{0} get-tuple-element(while.132), index=3 get.137 = f32[32,10]{1,0} get-tuple-element(while.132), index=4 get.138 = f32[10]{0} get-tuple-element(while.132), index=5 ROOT tuple.139 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}) tuple(get.133, get.134, get.135, get.136, get.137, get.138) } )"; class HloValueSemanticsAnalysisTest : public HloTestBase { public: bool HasLabel(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis, HloModule* module, absl::string_view instruction_name, const HloValueSemanticLabel& expected_label) { HloInstruction* instruction = FindInstruction(module, instruction_name); const HloValueSemantics* semantics = hlo_value_semantics_analysis.GetSemantics(instruction); LOG(INFO) << "instruction: " << instruction->ToString() << semantics->ToString(); return semantics->label() == expected_label; } bool IsStatic(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis, HloModule* module, absl::string_view instruction_name) { return HasLabel(hlo_value_semantics_analysis, module, instruction_name, HloValueSemanticLabel::kStatic); } bool IsWeight(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis, HloModule* module, absl::string_view instruction_name) { return HasLabel(hlo_value_semantics_analysis, module, instruction_name, HloValueSemanticLabel::kWeight); } bool IsActivation( const HloValueSemanticsAnalysis& hlo_value_semantics_analysis, HloModule* module, absl::string_view instruction_name) { return HasLabel(hlo_value_semantics_analysis, module, instruction_name, HloValueSemanticLabel::kActivation); } bool IsActivationGradient( const HloValueSemanticsAnalysis& hlo_value_semantics_analysis, HloModule* module, absl::string_view instruction_name) { return HasLabel(hlo_value_semantics_analysis, module, instruction_name, HloValueSemanticLabel::kActivationGradient); } bool IsWeightGradient( const HloValueSemanticsAnalysis& hlo_value_semantics_analysis, HloModule* module, absl::string_view instruction_name) { return HasLabel(hlo_value_semantics_analysis, module, instruction_name, HloValueSemanticLabel::kWeightGradient); } bool IsTupleOrToken( const HloValueSemanticsAnalysis& hlo_value_semantics_analysis, HloModule* module, absl::string_view instruction_name) { return HasLabel(hlo_value_semantics_analysis, module, instruction_name, HloValueSemanticLabel::kTupleOrToken); } }; TEST_F(HloValueSemanticsAnalysisTest, OneMatmul) { const std::string module_str = R"( HloModule OneMatmul region_0.39 { Arg_0.40 = f32[] parameter(0) Arg_1.41 = f32[] parameter(1) ROOT add.42 = f32[] add(Arg_0.40, Arg_1.41) } ENTRY entry { Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1} Arg_7.8 = f32[4,32]{1,0} parameter(1), sharding={devices=[2,1]0,1} copy = f32[4,32]{1,0} copy(Arg_7.8), sharding={devices=[2,1]0,1} dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} constant.5 = f32[] constant(0), sharding={replicated} broadcast.2 = f32[4,128]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[2,1]0,1} maximum.33 = f32[4,128]{1,0} maximum(dot.0, broadcast.2), sharding={devices=[2,1]0,1} compare.34 = pred[4,128]{1,0} compare(dot.0, maximum.33), direction=EQ, sharding={devices=[2,1]0,1} constant.4 = f32[] constant(1), sharding={replicated} broadcast.1 = f32[4,128]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[2,1]0,1} select.35 = f32[4,128]{1,0} select(compare.34, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1} dot.2 = f32[32,128]{0,1} dot(copy, select.35), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} constant.11 = f32[] constant(-0.01), sharding={replicated} broadcast.12 = f32[32,128]{1,0} broadcast(constant.11), dimensions={}, sharding={devices=[2,1]0,1} multiply.52 = f32[32,128]{0,1} multiply(dot.2, broadcast.12), sharding={devices=[2,1]0,1} add.93 = f32[32,128]{1,0} add(Arg_1.2, multiply.52), sharding={devices=[2,1]0,1} reduce.43 = f32[] reduce(maximum.33, constant.5), dimensions={0,1}, to_apply=region_0.39, sharding={replicated} ROOT tuple.109 = (f32[32,128]{1,0}, f32[]) tuple(add.93, reduce.43), sharding={{devices=[2,1]0,1}, {replicated}} } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(module_str, 1, 2)); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis, HloValueSemanticsAnalysis::Run(*module)); EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "copy")); EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "Arg_1.2")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0")); EXPECT_TRUE( IsStatic(*hlo_value_semantics_analysis, module.get(), "select.35")); EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2")); } TEST_F(HloValueSemanticsAnalysisTest, HandleConditional) { const std::string module_str = R"( HloModule Module branch0 { tparam = f32[4] parameter(0) tgte1 = f32[4] ceil(tparam) ROOT tuple = (f32[4], f32[4]) tuple(tparam, tgte1) } branch1 { fparam = f32[4] parameter(0) %async-start = ((f32[4]), f32[4], s32[]) abs-start(f32[4] fparam), async_execution_thread="parallel_thread" %async-done = f32[4] abs-done(((f32[4]), f32[4], s32[]) %async-start) ROOT tuple = (f32[4], f32[4]) tuple(fparam, %async-done) } ENTRY entry { p0 = f32[4] parameter(0) b0 = s32[] parameter(1) ROOT conditional = (f32[4], f32[4]) conditional(b0, p0, p0), branch_computations={branch0, branch1} } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(module_str, 1, 2)); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis, HloValueSemanticsAnalysis::Run(*module)); EXPECT_TRUE(IsTupleOrToken(*hlo_value_semantics_analysis, module.get(), "conditional")); } TEST_F(HloValueSemanticsAnalysisTest, TwoMatmuls) { const std::string module_str = R"( HloModule TwoMatmuls region_0.44 { Arg_0.45 = f32[] parameter(0) Arg_1.46 = f32[] parameter(1) ROOT add.47 = f32[] add(Arg_0.45, Arg_1.46) } ENTRY entry { Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1} Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1} copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1} dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} Arg_2.3 = f32[128,8]{1,0} parameter(1), sharding={devices=[1,2]0,1} dot.1 = f32[4,8]{1,0} dot(dot.0, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1} constant.5 = f32[] constant(0), sharding={replicated} broadcast.1 = f32[4,8]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[1,2]0,1} maximum.38 = f32[4,8]{1,0} maximum(dot.1, broadcast.1), sharding={devices=[1,2]0,1} compare.39 = pred[4,8]{1,0} compare(dot.1, maximum.38), direction=EQ, sharding={devices=[1,2]0,1} constant.4 = f32[] constant(1), sharding={replicated} broadcast.0 = f32[4,8]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[1,2]0,1} select.40 = f32[4,8]{1,0} select(compare.39, broadcast.0, broadcast.1), sharding={devices=[1,2]0,1} dot.2 = f32[4,128]{1,0} dot(select.40, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1} dot.5 = f32[32,128]{0,1} dot(copy, dot.2), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} constant.12 = f32[] constant(-0.01), sharding={replicated} broadcast.13 = f32[32,128]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[2,1]0,1} multiply.68 = f32[32,128]{0,1} multiply(dot.5, broadcast.13), sharding={devices=[2,1]0,1} add.79 = f32[32,128]{1,0} add(Arg_1.2, multiply.68), sharding={devices=[2,1]0,1} dot.6 = f32[128,8]{0,1} dot(dot.0, select.40), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1} broadcast.11 = f32[128,8]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[1,2]0,1} multiply.69 = f32[128,8]{0,1} multiply(dot.6, broadcast.11), sharding={devices=[1,2]0,1} add.80 = f32[128,8]{1,0} add(Arg_2.3, multiply.69), sharding={devices=[1,2]0,1} reduce.48 = f32[] reduce(maximum.38, constant.5), dimensions={0,1}, to_apply=region_0.44, sharding={replicated} ROOT tuple.95 = (f32[32,128]{1,0}, f32[128,8]{1,0}, f32[]) tuple(add.79, add.80, reduce.48), sharding={{devices=[2,1]0,1}, {devices=[1,2]0,1}, {replicated}} } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(module_str, 1, 2)); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis, HloValueSemanticsAnalysis::Run(*module)); EXPECT_FALSE( IsActivation(*hlo_value_semantics_analysis, module.get(), "copy")); EXPECT_FALSE( IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_1.2")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0")); EXPECT_FALSE( IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_2.3")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.1")); EXPECT_TRUE( IsStatic(*hlo_value_semantics_analysis, module.get(), "select.40")); EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.5")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.6")); } TEST_F(HloValueSemanticsAnalysisTest, RepeatWhile) { const std::string module_str = R"( HloModule RepeatWhile region_0.52 { arg_tuple.53 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} get-tuple-element.54 = s32[] get-tuple-element(arg_tuple.53), index=0, sharding={replicated} constant.61 = s32[] constant(1), sharding={replicated} add.105 = s32[] add(get-tuple-element.54, constant.61), sharding={replicated} get-tuple-element.55 = f32[4,32]{1,0} get-tuple-element(arg_tuple.53), index=1, sharding={devices=[2,1]0,1} get-tuple-element.59 = f32[3,32,128]{2,1,0} get-tuple-element(arg_tuple.53), index=5, sharding={devices=[1,2,1]0,1} constant.69 = s32[] constant(0), sharding={replicated} compare.70 = pred[] compare(get-tuple-element.54, constant.69), direction=LT, sharding={replicated} constant.68 = s32[] constant(3), sharding={replicated} add.71 = s32[] add(get-tuple-element.54, constant.68), sharding={replicated} select.72 = s32[] select(compare.70, add.71, get-tuple-element.54), sharding={replicated} dynamic-slice.73 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.59, select.72, constant.69, constant.69), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1} reshape.74 = f32[32,128]{1,0} reshape(dynamic-slice.73), sharding={devices=[2,1]0,1} dot.0 = f32[4,128]{1,0} dot(get-tuple-element.55, reshape.74), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} get-tuple-element.60 = f32[3,128,32]{2,1,0} get-tuple-element(arg_tuple.53), index=6, sharding={devices=[1,1,2]0,1} dynamic-slice.78 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.60, select.72, constant.69, constant.69), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1} reshape.79 = f32[128,32]{1,0} reshape(dynamic-slice.78), sharding={devices=[1,2]0,1} dot.1 = f32[4,32]{1,0} dot(dot.0, reshape.79), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} constant.43 = f32[] constant(0), sharding={replicated} broadcast.2 = f32[4,32]{1,0} broadcast(constant.43), dimensions={}, sharding={devices=[2,1]0,1} maximum.84 = f32[4,32]{1,0} maximum(dot.1, broadcast.2), sharding={devices=[2,1]0,1} get-tuple-element.56 = f32[3,4,128]{2,1,0} get-tuple-element(arg_tuple.53), index=2, sharding={devices=[1,2,1]0,1} reshape.90 = f32[1,4,128]{2,1,0} reshape(dot.0), sharding={devices=[1,2,1]0,1} dynamic-update-slice.94 = f32[3,4,128]{2,1,0} dynamic-update-slice(get-tuple-element.56, reshape.90, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1} get-tuple-element.57 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=3, sharding={devices=[1,2,1]0,1} compare.85 = pred[4,32]{1,0} compare(dot.1, maximum.84), direction=EQ, sharding={devices=[2,1]0,1} constant.42 = f32[] constant(1), sharding={replicated} broadcast.1 = f32[4,32]{1,0} broadcast(constant.42), dimensions={}, sharding={devices=[2,1]0,1} select.86 = f32[4,32]{1,0} select(compare.85, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1} reshape.95 = f32[1,4,32]{2,1,0} reshape(select.86), sharding={devices=[1,2,1]0,1} dynamic-update-slice.99 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.57, reshape.95, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1} get-tuple-element.58 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=4, sharding={devices=[1,2,1]0,1} reshape.100 = f32[1,4,32]{2,1,0} reshape(get-tuple-element.55), sharding={devices=[1,2,1]0,1} dynamic-update-slice.104 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.58, reshape.100, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1} ROOT tuple.106 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(add.105, maximum.84, dynamic-update-slice.94, dynamic-update-slice.99, dynamic-update-slice.104, get-tuple-element.59, get-tuple-element.60), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} } region_1.107 { arg_tuple.108 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} get-tuple-element.109 = s32[] get-tuple-element(arg_tuple.108), index=0, sharding={replicated} constant.116 = s32[] constant(3) ROOT compare.117 = pred[] compare(get-tuple-element.109, constant.116), direction=LT } region_2.126 { Arg_0.127 = f32[] parameter(0) Arg_1.128 = f32[] parameter(1) ROOT add.129 = f32[] add(Arg_0.127, Arg_1.128) } wide.wide.region_3.156.clone.clone { wide_param.7 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}} get-tuple-element.185 = s32[] get-tuple-element(wide_param.7), index=0, sharding={replicated} constant.34 = s32[] constant(1), sharding={replicated} add.14 = s32[] add(get-tuple-element.185, constant.34), sharding={replicated} get-tuple-element.186 = f32[4,32]{1,0} get-tuple-element(wide_param.7), index=1, sharding={devices=[2,1]0,1} get-tuple-element.190 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=5, sharding={devices=[1,2,1]0,1} constant.35 = s32[] constant(3), sharding={replicated} subtract.3 = s32[] subtract(constant.35, get-tuple-element.185), sharding={replicated} constant.6..sunk.4 = s32[] constant(-1), sharding={replicated} add.15 = s32[] add(subtract.3, constant.6..sunk.4), sharding={replicated} constant.36 = s32[] constant(0), sharding={replicated} compare.7 = pred[] compare(add.15, constant.36), direction=LT, sharding={replicated} constant.26..sunk.1 = s32[] constant(2), sharding={replicated} add.16 = s32[] add(subtract.3, constant.26..sunk.1), sharding={replicated} select.4 = s32[] select(compare.7, add.16, add.15), sharding={replicated} dynamic-slice.15 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.190, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1} reshape.21 = f32[4,32]{1,0} reshape(dynamic-slice.15), sharding={devices=[2,1]0,1} multiply.3 = f32[4,32]{1,0} multiply(get-tuple-element.186, reshape.21), sharding={devices=[2,1]0,1} get-tuple-element.192 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=7, sharding={devices=[1,1,2]0,1} dynamic-slice.16 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.192, select.4, constant.36, constant.36), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1} reshape.22 = f32[128,32]{1,0} reshape(dynamic-slice.16), sharding={devices=[1,2]0,1} dot.20 = f32[4,128]{1,0} dot(multiply.3, reshape.22), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1} get-tuple-element.191 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=6, sharding={devices=[1,2,1]0,1} dynamic-slice.17 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.191, select.4, constant.36, constant.36), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1} reshape.23 = f32[32,128]{1,0} reshape(dynamic-slice.17), sharding={devices=[2,1]0,1} dot.21 = f32[4,32]{1,0} dot(dot.20, reshape.23), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[1,2]0,1} get-tuple-element.187 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=2, sharding={devices=[1,2,1]0,1} get-tuple-element.193 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=8, sharding={devices=[1,2,1]0,1} dynamic-slice.18 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.193, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1} reshape.24 = f32[4,32]{1,0} reshape(dynamic-slice.18), sharding={devices=[2,1]0,1} dot.22 = f32[32,128]{0,1} dot(reshape.24, dot.20), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} reshape.25 = f32[1,32,128]{2,1,0} reshape(dot.22), sharding={devices=[1,2,1]0,1} dynamic-update-slice.6 = f32[3,32,128]{2,1,0} dynamic-update-slice(get-tuple-element.187, reshape.25, select.4, constant.36, constant.36), sharding={devices=[1,2,1]0,1} get-tuple-element.188 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=3, sharding={devices=[1,1,2]0,1} get-tuple-element.189 = f32[3,4,128]{2,1,0} get-tuple-element(wide_param.7), index=4, sharding={devices=[1,2,1]0,1} dynamic-slice.19 = f32[1,4,128]{2,1,0} dynamic-slice(get-tuple-element.189, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,128}, sharding={devices=[1,2,1]0,1} reshape.26 = f32[4,128]{1,0} reshape(dynamic-slice.19), sharding={devices=[2,1]0,1} dot.23 = f32[128,32]{0,1} dot(reshape.26, multiply.3), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1} reshape.27 = f32[1,128,32]{2,1,0} reshape(dot.23), sharding={devices=[1,1,2]0,1} dynamic-update-slice.7 = f32[3,128,32]{2,1,0} dynamic-update-slice(get-tuple-element.188, reshape.27, select.4, constant.36, constant.36), sharding={devices=[1,1,2]0,1} ROOT tuple.19 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(add.14, dot.21, dynamic-update-slice.6, dynamic-update-slice.7, get-tuple-element.189, get-tuple-element.190, get-tuple-element.191, get-tuple-element.192, get-tuple-element.193), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}} } wide.wide.region_4.218.clone.clone { wide_param.6 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}} get-tuple-element.184 = s32[] get-tuple-element(wide_param.6), index=0, sharding={replicated} constant.28 = s32[] constant(3) ROOT compare.6 = pred[] compare(get-tuple-element.184, constant.28), direction=LT } ENTRY entry { Arg_1.2 = f32[3,32,128]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1} constant.45 = s32[] constant(0), sharding={replicated} constant.23 = f32[] constant(1), sharding={replicated} broadcast.24 = f32[4,32]{1,0} broadcast(constant.23), dimensions={}, sharding={devices=[1,2]0,1} constant.21 = f32[] constant(0), sharding={replicated} broadcast.22 = f32[3,32,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1} broadcast.20 = f32[3,128,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,1,2]0,1} Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1} copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1} broadcast.28 = f32[3,4,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1} broadcast.26 = f32[3,4,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1} Arg_2.3 = f32[3,128,32]{2,1,0} parameter(1), sharding={devices=[1,1,2]0,1} tuple.42 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(constant.45, copy, broadcast.28, broadcast.26, broadcast.26, Arg_1.2, Arg_2.3), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} while.118 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) while(tuple.42), condition=region_1.107, body=region_0.52, sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} get-tuple-element.179 = f32[3,4,128]{2,1,0} get-tuple-element(while.118), index=2, sharding={devices=[1,2,1]0,1} get-tuple-element.180 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=3, sharding={devices=[1,2,1]0,1} get-tuple-element.183 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=4, sharding={devices=[1,2,1]0,1} tuple.18 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(constant.45, broadcast.24, broadcast.22, broadcast.20, get-tuple-element.179, get-tuple-element.180, Arg_1.2, Arg_2.3, get-tuple-element.183), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}} while.3 = (s32[], f3
absl::Status EinsumHeightAnalysis::SetInstructionHeight( const HloInstruction* instruction, const ShapeTree<int>& height) { ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction); SetHeight(height_tree, height); return absl::OkStatus(); }
TEST_F(HloValueSemanticsAnalysisTest, OneMatmul) { const std::string module_str = R"( HloModule OneMatmul region_0.39 { Arg_0.40 = f32[] parameter(0) Arg_1.41 = f32[] parameter(1) ROOT add.42 = f32[] add(Arg_0.40, Arg_1.41) } ENTRY entry { Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1} Arg_7.8 = f32[4,32]{1,0} parameter(1), sharding={devices=[2,1]0,1} copy = f32[4,32]{1,0} copy(Arg_7.8), sharding={devices=[2,1]0,1} dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} constant.5 = f32[] constant(0), sharding={replicated} broadcast.2 = f32[4,128]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[2,1]0,1} maximum.33 = f32[4,128]{1,0} maximum(dot.0, broadcast.2), sharding={devices=[2,1]0,1} compare.34 = pred[4,128]{1,0} compare(dot.0, maximum.33), direction=EQ, sharding={devices=[2,1]0,1} constant.4 = f32[] constant(1), sharding={replicated} broadcast.1 = f32[4,128]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[2,1]0,1} select.35 = f32[4,128]{1,0} select(compare.34, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1} dot.2 = f32[32,128]{0,1} dot(copy, select.35), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} constant.11 = f32[] constant(-0.01), sharding={replicated} broadcast.12 = f32[32,128]{1,0} broadcast(constant.11), dimensions={}, sharding={devices=[2,1]0,1} multiply.52 = f32[32,128]{0,1} multiply(dot.2, broadcast.12), sharding={devices=[2,1]0,1} add.93 = f32[32,128]{1,0} add(Arg_1.2, multiply.52), sharding={devices=[2,1]0,1} reduce.43 = f32[] reduce(maximum.33, constant.5), dimensions={0,1}, to_apply=region_0.39, sharding={replicated} ROOT tuple.109 = (f32[32,128]{1,0}, f32[]) tuple(add.93, reduce.43), sharding={{devices=[2,1]0,1}, {replicated}} } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(module_str, 1, 2)); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis, HloValueSemanticsAnalysis::Run(*module)); EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "copy")); EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "Arg_1.2")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0")); EXPECT_TRUE( IsStatic(*hlo_value_semantics_analysis, module.get(), "select.35")); EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2")); } TEST_F(HloValueSemanticsAnalysisTest, HandleConditional) { const std::string module_str = R"( HloModule Module branch0 { tparam = f32[4] parameter(0) tgte1 = f32[4] ceil(tparam) ROOT tuple = (f32[4], f32[4]) tuple(tparam, tgte1) } branch1 { fparam = f32[4] parameter(0) %async-start = ((f32[4]), f32[4], s32[]) abs-start(f32[4] fparam), async_execution_thread="parallel_thread" %async-done = f32[4] abs-done(((f32[4]), f32[4], s32[]) %async-start) ROOT tuple = (f32[4], f32[4]) tuple(fparam, %async-done) } ENTRY entry { p0 = f32[4] parameter(0) b0 = s32[] parameter(1) ROOT conditional = (f32[4], f32[4]) conditional(b0, p0, p0), branch_computations={branch0, branch1} } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(module_str, 1, 2)); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis, HloValueSemanticsAnalysis::Run(*module)); EXPECT_TRUE(IsTupleOrToken(*hlo_value_semantics_analysis, module.get(), "conditional")); } TEST_F(HloValueSemanticsAnalysisTest, TwoMatmuls) { const std::string module_str = R"( HloModule TwoMatmuls region_0.44 { Arg_0.45 = f32[] parameter(0) Arg_1.46 = f32[] parameter(1) ROOT add.47 = f32[] add(Arg_0.45, Arg_1.46) } ENTRY entry { Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1} Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1} copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1} dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} Arg_2.3 = f32[128,8]{1,0} parameter(1), sharding={devices=[1,2]0,1} dot.1 = f32[4,8]{1,0} dot(dot.0, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1} constant.5 = f32[] constant(0), sharding={replicated} broadcast.1 = f32[4,8]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[1,2]0,1} maximum.38 = f32[4,8]{1,0} maximum(dot.1, broadcast.1), sharding={devices=[1,2]0,1} compare.39 = pred[4,8]{1,0} compare(dot.1, maximum.38), direction=EQ, sharding={devices=[1,2]0,1} constant.4 = f32[] constant(1), sharding={replicated} broadcast.0 = f32[4,8]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[1,2]0,1} select.40 = f32[4,8]{1,0} select(compare.39, broadcast.0, broadcast.1), sharding={devices=[1,2]0,1} dot.2 = f32[4,128]{1,0} dot(select.40, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1} dot.5 = f32[32,128]{0,1} dot(copy, dot.2), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} constant.12 = f32[] constant(-0.01), sharding={replicated} broadcast.13 = f32[32,128]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[2,1]0,1} multiply.68 = f32[32,128]{0,1} multiply(dot.5, broadcast.13), sharding={devices=[2,1]0,1} add.79 = f32[32,128]{1,0} add(Arg_1.2, multiply.68), sharding={devices=[2,1]0,1} dot.6 = f32[128,8]{0,1} dot(dot.0, select.40), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1} broadcast.11 = f32[128,8]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[1,2]0,1} multiply.69 = f32[128,8]{0,1} multiply(dot.6, broadcast.11), sharding={devices=[1,2]0,1} add.80 = f32[128,8]{1,0} add(Arg_2.3, multiply.69), sharding={devices=[1,2]0,1} reduce.48 = f32[] reduce(maximum.38, constant.5), dimensions={0,1}, to_apply=region_0.44, sharding={replicated} ROOT tuple.95 = (f32[32,128]{1,0}, f32[128,8]{1,0}, f32[]) tuple(add.79, add.80, reduce.48), sharding={{devices=[2,1]0,1}, {devices=[1,2]0,1}, {replicated}} } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(module_str, 1, 2)); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis, HloValueSemanticsAnalysis::Run(*module)); EXPECT_FALSE( IsActivation(*hlo_value_semantics_analysis, module.get(), "copy")); EXPECT_FALSE( IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_1.2")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0")); EXPECT_FALSE( IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_2.3")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.1")); EXPECT_TRUE( IsStatic(*hlo_value_semantics_analysis, module.get(), "select.40")); EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.5")); EXPECT_TRUE( IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.6")); } TEST_F(HloValueSemanticsAnalysisTest, RepeatWhile) { const std::string module_str = R"( HloModule RepeatWhile region_0.52 { arg_tuple.53 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} get-tuple-element.54 = s32[] get-tuple-element(arg_tuple.53), index=0, sharding={replicated} constant.61 = s32[] constant(1), sharding={replicated} add.105 = s32[] add(get-tuple-element.54, constant.61), sharding={replicated} get-tuple-element.55 = f32[4,32]{1,0} get-tuple-element(arg_tuple.53), index=1, sharding={devices=[2,1]0,1} get-tuple-element.59 = f32[3,32,128]{2,1,0} get-tuple-element(arg_tuple.53), index=5, sharding={devices=[1,2,1]0,1} constant.69 = s32[] constant(0), sharding={replicated} compare.70 = pred[] compare(get-tuple-element.54, constant.69), direction=LT, sharding={replicated} constant.68 = s32[] constant(3), sharding={replicated} add.71 = s32[] add(get-tuple-element.54, constant.68), sharding={replicated} select.72 = s32[] select(compare.70, add.71, get-tuple-element.54), sharding={replicated} dynamic-slice.73 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.59, select.72, constant.69, constant.69), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1} reshape.74 = f32[32,128]{1,0} reshape(dynamic-slice.73), sharding={devices=[2,1]0,1} dot.0 = f32[4,128]{1,0} dot(get-tuple-element.55, reshape.74), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} get-tuple-element.60 = f32[3,128,32]{2,1,0} get-tuple-element(arg_tuple.53), index=6, sharding={devices=[1,1,2]0,1} dynamic-slice.78 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.60, select.72, constant.69, constant.69), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1} reshape.79 = f32[128,32]{1,0} reshape(dynamic-slice.78), sharding={devices=[1,2]0,1} dot.1 = f32[4,32]{1,0} dot(dot.0, reshape.79), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} constant.43 = f32[] constant(0), sharding={replicated} broadcast.2 = f32[4,32]{1,0} broadcast(constant.43), dimensions={}, sharding={devices=[2,1]0,1} maximum.84 = f32[4,32]{1,0} maximum(dot.1, broadcast.2), sharding={devices=[2,1]0,1} get-tuple-element.56 = f32[3,4,128]{2,1,0} get-tuple-element(arg_tuple.53), index=2, sharding={devices=[1,2,1]0,1} reshape.90 = f32[1,4,128]{2,1,0} reshape(dot.0), sharding={devices=[1,2,1]0,1} dynamic-update-slice.94 = f32[3,4,128]{2,1,0} dynamic-update-slice(get-tuple-element.56, reshape.90, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1} get-tuple-element.57 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=3, sharding={devices=[1,2,1]0,1} compare.85 = pred[4,32]{1,0} compare(dot.1, maximum.84), direction=EQ, sharding={devices=[2,1]0,1} constant.42 = f32[] constant(1), sharding={replicated} broadcast.1 = f32[4,32]{1,0} broadcast(constant.42), dimensions={}, sharding={devices=[2,1]0,1} select.86 = f32[4,32]{1,0} select(compare.85, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1} reshape.95 = f32[1,4,32]{2,1,0} reshape(select.86), sharding={devices=[1,2,1]0,1} dynamic-update-slice.99 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.57, reshape.95, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1} get-tuple-element.58 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=4, sharding={devices=[1,2,1]0,1} reshape.100 = f32[1,4,32]{2,1,0} reshape(get-tuple-element.55), sharding={devices=[1,2,1]0,1} dynamic-update-slice.104 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.58, reshape.100, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1} ROOT tuple.106 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(add.105, maximum.84, dynamic-update-slice.94, dynamic-update-slice.99, dynamic-update-slice.104, get-tuple-element.59, get-tuple-element.60), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} } region_1.107 { arg_tuple.108 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} get-tuple-element.109 = s32[] get-tuple-element(arg_tuple.108), index=0, sharding={replicated} constant.116 = s32[] constant(3) ROOT compare.117 = pred[] compare(get-tuple-element.109, constant.116), direction=LT } region_2.126 { Arg_0.127 = f32[] parameter(0) Arg_1.128 = f32[] parameter(1) ROOT add.129 = f32[] add(Arg_0.127, Arg_1.128) } wide.wide.region_3.156.clone.clone { wide_param.7 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}} get-tuple-element.185 = s32[] get-tuple-element(wide_param.7), index=0, sharding={replicated} constant.34 = s32[] constant(1), sharding={replicated} add.14 = s32[] add(get-tuple-element.185, constant.34), sharding={replicated} get-tuple-element.186 = f32[4,32]{1,0} get-tuple-element(wide_param.7), index=1, sharding={devices=[2,1]0,1} get-tuple-element.190 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=5, sharding={devices=[1,2,1]0,1} constant.35 = s32[] constant(3), sharding={replicated} subtract.3 = s32[] subtract(constant.35, get-tuple-element.185), sharding={replicated} constant.6..sunk.4 = s32[] constant(-1), sharding={replicated} add.15 = s32[] add(subtract.3, constant.6..sunk.4), sharding={replicated} constant.36 = s32[] constant(0), sharding={replicated} compare.7 = pred[] compare(add.15, constant.36), direction=LT, sharding={replicated} constant.26..sunk.1 = s32[] constant(2), sharding={replicated} add.16 = s32[] add(subtract.3, constant.26..sunk.1), sharding={replicated} select.4 = s32[] select(compare.7, add.16, add.15), sharding={replicated} dynamic-slice.15 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.190, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1} reshape.21 = f32[4,32]{1,0} reshape(dynamic-slice.15), sharding={devices=[2,1]0,1} multiply.3 = f32[4,32]{1,0} multiply(get-tuple-element.186, reshape.21), sharding={devices=[2,1]0,1} get-tuple-element.192 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=7, sharding={devices=[1,1,2]0,1} dynamic-slice.16 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.192, select.4, constant.36, constant.36), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1} reshape.22 = f32[128,32]{1,0} reshape(dynamic-slice.16), sharding={devices=[1,2]0,1} dot.20 = f32[4,128]{1,0} dot(multiply.3, reshape.22), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1} get-tuple-element.191 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=6, sharding={devices=[1,2,1]0,1} dynamic-slice.17 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.191, select.4, constant.36, constant.36), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1} reshape.23 = f32[32,128]{1,0} reshape(dynamic-slice.17), sharding={devices=[2,1]0,1} dot.21 = f32[4,32]{1,0} dot(dot.20, reshape.23), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[1,2]0,1} get-tuple-element.187 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=2, sharding={devices=[1,2,1]0,1} get-tuple-element.193 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=8, sharding={devices=[1,2,1]0,1} dynamic-slice.18 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.193, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1} reshape.24 = f32[4,32]{1,0} reshape(dynamic-slice.18), sharding={devices=[2,1]0,1} dot.22 = f32[32,128]{0,1} dot(reshape.24, dot.20), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1} reshape.25 = f32[1,32,128]{2,1,0} reshape(dot.22), sharding={devices=[1,2,1]0,1} dynamic-update-slice.6 = f32[3,32,128]{2,1,0} dynamic-update-slice(get-tuple-element.187, reshape.25, select.4, constant.36, constant.36), sharding={devices=[1,2,1]0,1} get-tuple-element.188 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=3, sharding={devices=[1,1,2]0,1} get-tuple-element.189 = f32[3,4,128]{2,1,0} get-tuple-element(wide_param.7), index=4, sharding={devices=[1,2,1]0,1} dynamic-slice.19 = f32[1,4,128]{2,1,0} dynamic-slice(get-tuple-element.189, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,128}, sharding={devices=[1,2,1]0,1} reshape.26 = f32[4,128]{1,0} reshape(dynamic-slice.19), sharding={devices=[2,1]0,1} dot.23 = f32[128,32]{0,1} dot(reshape.26, multiply.3), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1} reshape.27 = f32[1,128,32]{2,1,0} reshape(dot.23), sharding={devices=[1,1,2]0,1} dynamic-update-slice.7 = f32[3,128,32]{2,1,0} dynamic-update-slice(get-tuple-element.188, reshape.27, select.4, constant.36, constant.36), sharding={devices=[1,1,2]0,1} ROOT tuple.19 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(add.14, dot.21, dynamic-update-slice.6, dynamic-update-slice.7, get-tuple-element.189, get-tuple-element.190, get-tuple-element.191, get-tuple-element.192, get-tuple-element.193), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}} } wide.wide.region_4.218.clone.clone { wide_param.6 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}} get-tuple-element.184 = s32[] get-tuple-element(wide_param.6), index=0, sharding={replicated} constant.28 = s32[] constant(3) ROOT compare.6 = pred[] compare(get-tuple-element.184, constant.28), direction=LT } ENTRY entry { Arg_1.2 = f32[3,32,128]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1} constant.45 = s32[] constant(0), sharding={replicated} constant.23 = f32[] constant(1), sharding={replicated} broadcast.24 = f32[4,32]{1,0} broadcast(constant.23), dimensions={}, sharding={devices=[1,2]0,1} constant.21 = f32[] constant(0), sharding={replicated} broadcast.22 = f32[3,32,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1} broadcast.20 = f32[3,128,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,1,2]0,1} Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1} copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1} broadcast.28 = f32[3,4,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1} broadcast.26 = f32[3,4,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1} Arg_2.3 = f32[3,128,32]{2,1,0} parameter(1), sharding={devices=[1,1,2]0,1} tuple.42 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(constant.45, copy, broadcast.28, broadcast.26, broadcast.26, Arg_1.2, Arg_2.3), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} while.118 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) while(tuple.42), condition=region_1.107, body=region_0.52, sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}} get-tuple-element.179 = f32[3,4,128]{2,1,0} get-tuple-element(while.118), index=2, sharding={devices=[1,2,1]0,1} get-tuple-element.180 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=3, sharding={devices=[1,2,1]0,1} get-tuple-element.183 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=4, sharding={devices=[1,2,1]0,1} tuple.18 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(constant.45, broadcast.24, broadcast.22, broadcast.20, get-tuple-element.179, get-tuple-element.180, Arg_1.2, Arg_2.3, get-tuple-element.183), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}} while.3 = (s32[], f3
#include "tensorflow/core/util/quantization/uniform_quant_ops_params.h" #include <algorithm> #include <cmath> #include <cstdint> #include <string> #include <vector> #include "absl/algorithm/container.h" namespace tensorflow { namespace { using tensorflow::errors::InvalidArgument; Status ValidDim(int64_t dims, int64_t dim) { if (dim < 0 || dim >= dims) { return InvalidArgument( "Each dimension number must be in region [0, rank). Given rank ", dims, " and dimension number value ", dim); } return absl::OkStatus(); } Status ValidSpatialDimensions( int64_t dims, const protobuf::RepeatedField<int64_t>& spatial_dimensions) { if (spatial_dimensions.size() != dims - 2) { return InvalidArgument( "Spatial dimensions size must be rank - 2. Given rank ", dims, " and spatial dimensions size ", spatial_dimensions.size()); } for (int i = 0; i < spatial_dimensions.size(); ++i) { TF_RETURN_IF_ERROR(ValidDim(dims, spatial_dimensions.Get(i))); } return absl::OkStatus(); } } Status UniformQuantizedConvolutionParams::LoadFromAttrs( const OpKernelConstruction& context) { return LoadFromAttrsInternal(context); } Status UniformQuantizedConvolutionParams::LoadFromAttrs( const shape_inference::InferenceContext& context) { return LoadFromAttrsInternal(context); } Status UniformQuantizedConvolutionParams::ValidateOrFillParamsAndValidateShape( const TensorShape& lhs_shape, const TensorShape& rhs_shape) { if (lhs_shape.dims() != rhs_shape.dims()) { return InvalidArgument( "lhs and rhs must have same dims. Given lhs and rhs of shapes: ", lhs_shape.DebugString(), rhs_shape.DebugString()); } const int64_t dims = lhs_shape.dims(); if (dims <= 2) { return InvalidArgument("lhs and rhs shape dims must be at least 3. Given: ", dims); } const int64_t num_spatial_dims = dims - 2; if (window_strides_.empty()) { window_strides_.resize(num_spatial_dims, 1); } else if (window_strides_.size() != num_spatial_dims) { return InvalidArgument("Size of window_strides Attr must be dims - 2."); } else if (!absl::c_all_of(window_strides_, [](int stride) { return stride >= 1; })) { return InvalidArgument( "All elements of window_strides must be >= 1. Given ", absl::StrJoin(window_strides_, ", ")); } if (lhs_dilation_.empty()) { lhs_dilation_.resize(num_spatial_dims, 1); } else if (lhs_dilation_.size() != num_spatial_dims) { return InvalidArgument("Size of lhs_dilation Attr must be dims - 2."); } else if (!absl::c_all_of(lhs_dilation_, [](const int dilation) { return dilation >= 1; })) { return InvalidArgument("All elements of lhs_dilation must be >= 1. Given ", absl::StrJoin(lhs_dilation_, ", ")); } if (rhs_dilation_.empty()) { rhs_dilation_.resize(num_spatial_dims, 1); } else if (rhs_dilation_.size() != num_spatial_dims) { return InvalidArgument("Size of rhs_dilation Attr must be dims - 2."); } else if (!absl::c_all_of(rhs_dilation_, [](const int dilation) { return dilation >= 1; })) { return InvalidArgument("All elements of rhs_dilation must be >= 1. Given ", absl::StrJoin(rhs_dilation_, ", ")); } if (dimension_numbers_.input_spatial_dimensions_size() == 0) { dimension_numbers_.set_input_batch_dimension(0); dimension_numbers_.set_input_feature_dimension(1); for (int64_t i = 0; i < num_spatial_dims; ++i) { dimension_numbers_.add_input_spatial_dimensions(2 + i); } dimension_numbers_.set_kernel_output_feature_dimension(0); dimension_numbers_.set_kernel_input_feature_dimension(1); for (int64_t i = 0; i < num_spatial_dims; ++i) { dimension_numbers_.add_kernel_spatial_dimensions(2 + i); } dimension_numbers_.set_output_batch_dimension(0); dimension_numbers_.set_output_feature_dimension(1); for (int64_t i = 0; i < num_spatial_dims; ++i) { dimension_numbers_.add_output_spatial_dimensions(2 + i); } } else { TF_RETURN_IF_ERROR( ValidDim(dims, dimension_numbers_.input_batch_dimension())); TF_RETURN_IF_ERROR( ValidDim(dims, dimension_numbers_.input_feature_dimension())); TF_RETURN_IF_ERROR(ValidSpatialDimensions( dims, dimension_numbers_.input_spatial_dimensions())); TF_RETURN_IF_ERROR( ValidDim(dims, dimension_numbers_.kernel_input_feature_dimension())); TF_RETURN_IF_ERROR( ValidDim(dims, dimension_numbers_.kernel_output_feature_dimension())); TF_RETURN_IF_ERROR(ValidSpatialDimensions( dims, dimension_numbers_.kernel_spatial_dimensions())); TF_RETURN_IF_ERROR( ValidDim(dims, dimension_numbers_.output_batch_dimension())); TF_RETURN_IF_ERROR( ValidDim(dims, dimension_numbers_.output_batch_dimension())); TF_RETURN_IF_ERROR(ValidSpatialDimensions( dims, dimension_numbers_.output_spatial_dimensions())); } if (feature_group_count_ <= 0) { return InvalidArgument( "feature_group_count must be a positive integer, given: ", feature_group_count_); } const int64_t lhs_feature_count = lhs_shape.dim_size(dimension_numbers_.input_feature_dimension()); if (lhs_feature_count % feature_group_count_) { return InvalidArgument( "feature_group_count must divide lhs feature dimension size, but ", feature_group_count_, " does not divide ", lhs_feature_count); } const int64_t rhs_input_feature_count = rhs_shape.dim_size(dimension_numbers_.kernel_input_feature_dimension()); if (lhs_feature_count % rhs_input_feature_count) { return InvalidArgument( "rhs input feature dimension must divide lhs feature dimension " "size, but ", rhs_input_feature_count, " does not divide ", lhs_feature_count); } if (lhs_feature_count / feature_group_count_ != rhs_input_feature_count) { return InvalidArgument( "lhs feature dimension size divided by feature_group_count must equal " "the rhs input feature dimension size, but ", lhs_feature_count, " / ", feature_group_count_, " != ", rhs_input_feature_count); } const int64_t rhs_output_feature_count = rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension()); if (rhs_output_feature_count % feature_group_count_) { return InvalidArgument( "rhs output dimension size must be a multiple of feature_group_count, " "but ", rhs_output_feature_count, " is not a multiple of ", feature_group_count_); } if (batch_group_count_ <= 0) { return InvalidArgument( "batch_group_count Attr must be a positive integer. Given: ", batch_group_count_); } const int64_t lhs_batch_count = lhs_shape.dim_size(dimension_numbers_.input_batch_dimension()); if (lhs_batch_count % batch_group_count_) { return InvalidArgument( "batch_group_count must divide lhs batch dimension size, but ", batch_group_count_, " does not divide ", lhs_batch_count); } if (rhs_output_feature_count % batch_group_count_) { return InvalidArgument( "rhs output dimension size must be a multiple of batch_group_count, " "but ", rhs_output_feature_count, " is not a multiple of ", batch_group_count_); } return ValidateOrFillPaddingList(lhs_shape, rhs_shape); } absl::StatusOr<TensorShape> UniformQuantizedConvolutionParams::CalculateOutputShape( const TensorShape& lhs_shape, const TensorShape& rhs_shape) const { std::vector<int64_t> output_shape_buf(lhs_shape.dims()); output_shape_buf[dimension_numbers_.output_batch_dimension()] = lhs_shape.dim_size(dimension_numbers_.input_batch_dimension()) / batch_group_count_; output_shape_buf[dimension_numbers_.output_feature_dimension()] = rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension()); for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size(); ++i) { const int64_t lhs_size_dilated = DilatedSize( lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)), lhs_dilation_[i]); const int64_t rhs_size_dilated = DilatedSize( rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)), rhs_dilation_[i]); const int64_t output_size_numerator = lhs_size_dilated + padding_list_[2 * i] + padding_list_[2 * i + 1] - rhs_size_dilated + 1; const int64_t output_size_denominator = window_strides_[i]; output_shape_buf[dimension_numbers_.output_spatial_dimensions(i)] = (output_size_numerator + output_size_denominator - 1) / output_size_denominator; } TensorShape output_shape; TF_RETURN_IF_ERROR( TensorShape::BuildTensorShape(output_shape_buf, &output_shape)); return output_shape; } template <typename ContextT> Status UniformQuantizedConvolutionParams::LoadFromAttrsInternal( const ContextT& context) { TF_RETURN_IF_ERROR(context.GetAttr("window_strides", &window_strides_)); TF_RETURN_IF_ERROR(context.GetAttr("lhs_dilation", &lhs_dilation_)); TF_RETURN_IF_ERROR(context.GetAttr("rhs_dilation", &rhs_dilation_)); TF_RETURN_IF_ERROR(context.GetAttr("batch_group_count", &batch_group_count_)); TF_RETURN_IF_ERROR( context.GetAttr("feature_group_count", &feature_group_count_)); TF_RETURN_IF_ERROR(context.GetAttr("padding", &padding_)); TF_RETURN_IF_ERROR(context.GetAttr("explicit_padding", &padding_list_)); if (padding_ != "EXPLICIT" && padding_ != "SAME" && padding_ != "VALID") { return InvalidArgument( "padding Attr must be one of [EXPLICIT | SAME | VALID], but given: ", padding_); } else if (padding_ != "EXPLICIT" && !padding_list_.empty()) { return InvalidArgument( "If padding Attr is not 'EXPLICIT', explicit_padding Attr must be " "empty. Given padding ", padding_, " and explicit_padding of size ", padding_list_.size()); } std::string dimension_numbers_str; TF_RETURN_IF_ERROR( context.GetAttr("dimension_numbers", &dimension_numbers_str)); if (dimension_numbers_str.empty()) { dimension_numbers_.Clear(); } else if (!dimension_numbers_.ParseFromString(dimension_numbers_str)) { return InvalidArgument("Error parsing convolution dimension numbers."); } return absl::OkStatus(); } Status UniformQuantizedConvolutionParams::ValidateOrFillPaddingList( const TensorShape& lhs_shape, const TensorShape& rhs_shape) { const int64_t dims = lhs_shape.dims(); const int64_t padding_list_size = 2 * (dims - 2); if (padding_ == "EXPLICIT") { if (padding_list_.size() != padding_list_size) { return InvalidArgument( "Size of explicit_padding Attr must be 2 * (rank - 2). Given rank ", dims, " and explicit_padding of size ", padding_list_.size()); } else if (!absl::c_all_of(padding_list_, [](int elem) { return elem >= 0; })) { return InvalidArgument("All explicit_padding elems must be >= 0, Given ", absl::StrJoin(padding_list_, ", ")); } } else if (padding_ == "VALID") { padding_list_.resize(padding_list_size, 0); } else { padding_list_.resize(padding_list_size); for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size(); ++i) { const int64_t stride = window_strides_[i]; const int64_t lhs_size_dilated = DilatedSize( lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)), lhs_dilation_[i]); const int64_t rhs_size_dilated = DilatedSize( rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)), rhs_dilation_[i]); const int64_t output_size = (lhs_size_dilated + stride - 1) / stride; const int64_t total_padding = std::max( (output_size - 1) * stride + rhs_size_dilated - lhs_size_dilated, static_cast<int64_t>(0)); const int64_t padding_begin = total_padding / 2; const int64_t padding_end = total_padding - padding_begin; padding_list_[2 * i] = padding_begin; padding_list_[2 * i + 1] = padding_end; } } return absl::OkStatus(); } }
#include "tensorflow/core/util/quantization/uniform_quant_ops_params.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h" #include "tsl/lib/core/status_test_util.h" namespace tensorflow { namespace { using protobuf::TextFormat; using ::testing::ElementsAreArray; TEST(UniformQuantizedConvolutionParamsTest, DilatedSize) { EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(0, 2), 0); EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(10, 3), 28); } TEST(UniformQuantizedConvolutionParamsTest, ValidateOrFillParamsAndValidateShapeDefaultAttr) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; UniformQuantizedConvolutionParams params({}, {}, {}, dimension_numbers, 1, 1, "VALID"); TF_ASSERT_OK( params.ValidateOrFillParamsAndValidateShape({2, 2, 3, 4}, {3, 2, 2, 3})); EXPECT_THAT(params.window_strides(), ElementsAreArray({1, 1})); EXPECT_THAT(params.lhs_dilation(), ElementsAreArray({1, 1})); EXPECT_THAT(params.rhs_dilation(), ElementsAreArray({1, 1})); EXPECT_THAT(params.padding_list(), ElementsAreArray({0, 0, 0, 0})); EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0); EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 1); EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(), ElementsAreArray({2, 3})); EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 0); EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 1); EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(), ElementsAreArray({2, 3})); EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0); EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 1); EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(), ElementsAreArray({2, 3})); } TEST(UniformQuantizedConvolutionParamsTest, ValidateOrFillParamsAndValidateShapeSetAttr) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 0 input_feature_dimension: 3 input_spatial_dimensions: 1 input_spatial_dimensions: 2 kernel_output_feature_dimension: 3 kernel_input_feature_dimension: 2 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 1 output_batch_dimension: 0 output_feature_dimension: 3 output_spatial_dimensions: 1 output_spatial_dimensions: 2 )pb", &dimension_numbers)); UniformQuantizedConvolutionParams params({2, 2}, {3, 3}, {4, 4}, dimension_numbers, 2, 1, "EXPLICIT", {1, 1, 2, 2}); TF_ASSERT_OK( params.ValidateOrFillParamsAndValidateShape({2, 3, 4, 2}, {2, 3, 1, 2})); EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 1, 2, 2})); EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0); EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 3); EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(), ElementsAreArray({1, 2})); EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 3); EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 2); EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(), ElementsAreArray({0, 1})); EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0); EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 3); EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(), ElementsAreArray({1, 2})); } TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeDefaultAttr) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; UniformQuantizedConvolutionParams params({}, {}, {}, dimension_numbers, 1, 1, "VALID"); const TensorShape lhs_shape({2, 2, 3, 4}); const TensorShape rhs_shape({3, 2, 2, 3}); TF_ASSERT_OK( params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape)); auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape); TF_ASSERT_OK(shape_or.status()); EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 2, 2})); } TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeSetAttr) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 0 input_feature_dimension: 3 input_spatial_dimensions: 1 input_spatial_dimensions: 2 kernel_output_feature_dimension: 3 kernel_input_feature_dimension: 2 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 1 output_batch_dimension: 0 output_feature_dimension: 3 output_spatial_dimensions: 1 output_spatial_dimensions: 2 )pb", &dimension_numbers)); UniformQuantizedConvolutionParams params({2, 2}, {3, 3}, {4, 4}, dimension_numbers, 2, 1, "EXPLICIT", {1, 1, 2, 2}); const TensorShape lhs_shape({2, 3, 4, 2}); const TensorShape rhs_shape({2, 3, 1, 2}); TF_ASSERT_OK( params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape)); auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape); TF_ASSERT_OK(shape_or.status()); EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 3, 2})); } TEST(UniformQuantizedConvolutionParamsTest, CalculateSameOptionPadding) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; UniformQuantizedConvolutionParams params({}, {}, {}, dimension_numbers, 1, 1, "SAME"); const TensorShape lhs_shape({2, 2, 3, 4}); const TensorShape rhs_shape({3, 2, 4, 3}); TF_ASSERT_OK( params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape)); EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 2, 1, 1})); } } }
absl::StatusOr<TensorShape> UniformQuantizedConvolutionParams::CalculateOutputShape( const TensorShape& lhs_shape, const TensorShape& rhs_shape) const { std::vector<int64_t> output_shape_buf(lhs_shape.dims()); output_shape_buf[dimension_numbers_.output_batch_dimension()] = lhs_shape.dim_size(dimension_numbers_.input_batch_dimension()) / batch_group_count_; output_shape_buf[dimension_numbers_.output_feature_dimension()] = rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension()); for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size(); ++i) { const int64_t lhs_size_dilated = DilatedSize( lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)), lhs_dilation_[i]); const int64_t rhs_size_dilated = DilatedSize( rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)), rhs_dilation_[i]); const int64_t output_size_numerator = lhs_size_dilated + padding_list_[2 * i] + padding_list_[2 * i + 1] - rhs_size_dilated + 1; const int64_t output_size_denominator = window_strides_[i]; output_shape_buf[dimension_numbers_.output_spatial_dimensions(i)] = (output_size_numerator + output_size_denominator - 1) / output_size_denominator; } TensorShape output_shape; TF_RETURN_IF_ERROR( TensorShape::BuildTensorShape(output_shape_buf, &output_shape)); return output_shape; }
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeDefaultAttr) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; UniformQuantizedConvolutionParams params({}, {}, {}, dimension_numbers, 1, 1, "VALID"); const TensorShape lhs_shape({2, 2, 3, 4}); const TensorShape rhs_shape({3, 2, 2, 3}); TF_ASSERT_OK( params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape)); auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape); TF_ASSERT_OK(shape_or.status()); EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 2, 2})); } TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeSetAttr) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 0 input_feature_dimension: 3 input_spatial_dimensions: 1 input_spatial_dimensions: 2 kernel_output_feature_dimension: 3 kernel_input_feature_dimension: 2 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 1 output_batch_dimension: 0 output_feature_dimension: 3 output_spatial_dimensions: 1 output_spatial_dimensions: 2 )pb", &dimension_numbers)); UniformQuantizedConvolutionParams params({2, 2}, {3, 3}, {4, 4}, dimension_numbers, 2, 1, "EXPLICIT", {1, 1, 2, 2}); const TensorShape lhs_shape({2, 3, 4, 2}); const TensorShape rhs_shape({2, 3, 1, 2}); TF_ASSERT_OK( params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape)); auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape); TF_ASSERT_OK(shape_or.status()); EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 3, 2})); }
#include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h" #include <string> #include <string_view> #include "absl/time/time.h" #include "tensorflow/c/experimental/next_pluggable_device/c_api.h" #include "tensorflow/c/tf_buffer.h" #include "tensorflow/c/tf_status.h" #include "tensorflow/c/tf_status_helper.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" namespace tensorflow { namespace { absl::StatusOr<std::string> ProcessGetKeyValueResult(TF_Buffer* result_buf, TF_Status* status) { if (TF_GetCode(status) != TF_OK) { return StatusFromTF_Status(status); } else { std::string result{static_cast<const char*>(result_buf->data), result_buf->length}; TF_DeleteBuffer(result_buf); return result; } } } Status CPluginCoordinationServiceAgent::InsertKeyValue(std::string_view key, std::string_view value) { TF_StatusPtr c_status_ptr(TF_NewStatus()); TF_Status* status = c_status_ptr.get(); TF_CoordinationServiceInsertKeyValue(key.data(), key.size(), value.data(), value.size(), agent_, status); return StatusFromTF_Status(status); } absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue( std::string_view key) { TF_StatusPtr c_status_ptr(TF_NewStatus()); TF_Status* status = c_status_ptr.get(); TF_Buffer* result_buf = TF_CoordinationServiceGetKeyValue(key.data(), key.size(), agent_, status); return ProcessGetKeyValueResult(result_buf, status); } absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue( std::string_view key, absl::Duration timeout) { TF_StatusPtr c_status_ptr(TF_NewStatus()); TF_Status* status = c_status_ptr.get(); TF_Buffer* result_buf = TF_CoordinationServiceGetKeyValueWithTimeout( key.data(), key.size(), absl::ToInt64Seconds(timeout), agent_, status); return ProcessGetKeyValueResult(result_buf, status); } absl::StatusOr<std::string> CPluginCoordinationServiceAgent::TryGetKeyValue( std::string_view key) { TF_StatusPtr c_status_ptr(TF_NewStatus()); TF_Status* status = c_status_ptr.get(); TF_Buffer* result_buf = TF_CoordinationServiceTryGetKeyValue( key.data(), key.size(), agent_, status); return ProcessGetKeyValueResult(result_buf, status); } Status CPluginCoordinationServiceAgent::DeleteKeyValue(std::string_view key) { TF_StatusPtr c_status_ptr(TF_NewStatus()); TF_Status* status = c_status_ptr.get(); TF_CoordinationServiceDeleteKeyValue(key.data(), key.size(), agent_, status); return StatusFromTF_Status(status); } }
#include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h" #include <memory> #include <ostream> #include <string> #include <utility> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/time/time.h" #include "xla/tsl/distributed_runtime/call_options.h" #include "xla/tsl/distributed_runtime/coordination/coordination_client.h" #include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h" #include "tensorflow/core/platform/status.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/env.h" #include "tsl/platform/test.h" #include "tsl/protobuf/coordination_config.pb.h" #include "tsl/protobuf/coordination_service.pb.h" namespace tensorflow { namespace { using tsl::CoordinationClient; using tsl::CoordinationServiceAgent; using tsl::CallOptions; using tsl::DeleteKeyValueRequest; using tsl::DeleteKeyValueResponse; using tsl::GetKeyValueRequest; using tsl::GetKeyValueResponse; using tsl::InsertKeyValueRequest; using tsl::InsertKeyValueResponse; using ::testing::_; using ::testing::DoAll; using ::testing::InvokeArgument; using ::testing::Pointee; using ::testing::SetArgPointee; using ::testing::WithArgs; class ProtoStringMatcher { public: explicit ProtoStringMatcher(const tsl::protobuf::Message& expected) : expected_(expected.DebugString()) {} template <typename Message> bool MatchAndExplain(const Message& p, ::testing::MatchResultListener*) const { return p.DebugString() == expected_; } void DescribeTo(std::ostream* os) const { *os << expected_; } void DescribeNegationTo(std::ostream* os) const { *os << "not equal to expected message: " << expected_; } private: const std::string expected_; }; inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto( const tsl::protobuf::Message& x) { return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x)); } MATCHER(KvEq, "simple KeyValueEntry matcher") { const KeyValueEntry& kv0 = std::get<0>(arg); const KeyValueEntry& kv1 = std::get<1>(arg); return kv0.key() == kv1.key() && kv0.value() == kv1.value(); } class TestCoordinationClient : public CoordinationClient { public: TestCoordinationClient() = default; MOCK_METHOD(void, GetKeyValueAsync, (CallOptions * call_opts, const GetKeyValueRequest*, GetKeyValueResponse*, StatusCallback), (override)); MOCK_METHOD(void, TryGetKeyValueAsync, (const TryGetKeyValueRequest*, TryGetKeyValueResponse*, StatusCallback), (override)); MOCK_METHOD(void, InsertKeyValueAsync, (const InsertKeyValueRequest*, InsertKeyValueResponse*, StatusCallback), (override)); MOCK_METHOD(void, DeleteKeyValueAsync, (const DeleteKeyValueRequest*, DeleteKeyValueResponse*, StatusCallback), (override)); void GetKeyValueDirAsync(const tsl::GetKeyValueDirRequest* request, tsl::GetKeyValueDirResponse* response, StatusCallback done) override { done(absl::UnimplementedError("GetKeyValueDirAsync")); } void ResetTaskAsync(const tsl::ResetTaskRequest* request, tsl::ResetTaskResponse* response, StatusCallback done) override { done(absl::UnimplementedError("ResetTaskAsync")); } void ReportErrorToServiceAsync( const tsl::ReportErrorToServiceRequest* request, tsl::ReportErrorToServiceResponse* response, StatusCallback done) override { done(absl::UnimplementedError("ReportErrorToServiceAsync")); } void BarrierAsync(const tsl::BarrierRequest* request, tsl::BarrierResponse* response, StatusCallback done) override { done(absl::UnimplementedError("BarrierAsync")); } void GetTaskStateAsync(const tsl::GetTaskStateRequest* request, tsl::GetTaskStateResponse* response, StatusCallback done) override { done(absl::UnimplementedError("GetTaskStateAsync")); } void WaitForAllTasksAsync(const tsl::WaitForAllTasksRequest* request, tsl::WaitForAllTasksResponse* response, StatusCallback done) override { done(absl::UnimplementedError("WaitForAllTasksAsync")); } void CancelBarrierAsync(const tsl::CancelBarrierRequest* request, tsl::CancelBarrierResponse* response, StatusCallback done) override { done(absl::UnimplementedError("CancelBarrierAsync")); } void RegisterTaskAsync(tsl::CallOptions*, const tsl::RegisterTaskRequest* request, tsl::RegisterTaskResponse* response, StatusCallback done) override { done(absl::UnimplementedError("RegisterTaskAsync")); } void ShutdownTaskAsync(tsl::CallOptions*, const tsl::ShutdownTaskRequest* request, tsl::ShutdownTaskResponse* response, StatusCallback done) override { done(absl::UnimplementedError("ShutdownTaskAsync")); } void HeartbeatAsync(tsl::CallOptions*, const tsl::HeartbeatRequest* request, tsl::HeartbeatResponse* response, StatusCallback done) override { done(absl::UnimplementedError("HeartbeatAsync")); } void ReportErrorToTaskAsync(CallOptions* call_opts, const ReportErrorToTaskRequest* request, ReportErrorToTaskResponse* response, StatusCallback done) override { done(absl::UnimplementedError("ReportErrorToTaskAsync")); } }; class CPluginCoordinationServiceAgentTest : public ::testing::Test { public: void InitializeAgent(CoordinationServiceConfig config = {}) { config.set_service_leader("test_leader"); TF_ASSERT_OK(impl_->Initialize( tsl::Env::Default(), "test_job", 0, config, std::move(client_), [](Status s) { LOG(ERROR) << "Coordination agent is set to error: " << s; })); } TestCoordinationClient* GetClient() { CHECK(client_ != nullptr) << "GetClient() was called after InitializeAgent()"; return client_.get(); } protected: std::unique_ptr<CoordinationServiceAgent> impl_ = tsl::CreateCoordinationServiceAgent(); std::unique_ptr<CPluginCoordinationServiceAgent> agent_ = std::make_unique<CPluginCoordinationServiceAgent>(impl_.get()); std::unique_ptr<TestCoordinationClient> client_ = std::make_unique<TestCoordinationClient>(); }; TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Simple_Success) { const std::string test_key = "test_key"; const std::string test_value = "test_value"; GetKeyValueResponse mocked_response; auto kv = mocked_response.mutable_kv(); kv->set_key(test_key); kv->set_value(test_value); ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _)) .WillByDefault(DoAll(SetArgPointee<2>(mocked_response), InvokeArgument<3>(absl::OkStatus()))); InitializeAgent(); auto result = agent_->GetKeyValue(test_key); TF_ASSERT_OK(result.status()); EXPECT_EQ(*result, test_value); } TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_WithTimeout_Success) { const std::string test_key = "test_key"; const std::string test_value = "test_value"; GetKeyValueResponse mocked_response; auto kv = mocked_response.mutable_kv(); kv->set_key(test_key); kv->set_value(test_value); ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _)) .WillByDefault(DoAll(SetArgPointee<2>(mocked_response), InvokeArgument<3>(absl::OkStatus()))); InitializeAgent(); auto result = agent_->GetKeyValue(test_key, absl::Seconds(10)); TF_ASSERT_OK(result.status()); EXPECT_EQ(*result, test_value); } TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Timeout_ReturnError) { const std::string test_key = "test_key"; StatusCallback owned_done; ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _)) .WillByDefault(WithArgs<3>([&](StatusCallback done) { owned_done = done; })); InitializeAgent(); auto result = agent_->GetKeyValue(test_key, absl::Seconds(1)); EXPECT_EQ(result.status().code(), error::DEADLINE_EXCEEDED); owned_done(absl::CancelledError("error")); } TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_ZeroTimeout_ReturnError) { const std::string test_key = "test_key"; auto result = agent_->GetKeyValue(test_key, absl::ZeroDuration()); EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT); } TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_NegativeTimeout_ReturnError) { const std::string test_key = "test_key"; auto result = agent_->GetKeyValue(test_key, absl::Seconds(-1)); EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT); } TEST_F(CPluginCoordinationServiceAgentTest, InsertKeyValue_Success) { const std::string test_key = "test_key"; const std::string test_value = "test_value"; InsertKeyValueRequest expected_input; auto kv = expected_input.mutable_kv(); kv->set_key(test_key); kv->set_value(test_value); EXPECT_CALL(*GetClient(), InsertKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _)) .WillOnce(InvokeArgument<2>(absl::OkStatus())); InitializeAgent(); TF_ASSERT_OK(agent_->InsertKeyValue(test_key, test_value)); } TEST_F(CPluginCoordinationServiceAgentTest, DeleteKeyValue_Success) { const std::string test_key = "test_x_key"; DeleteKeyValueRequest expected_input; expected_input.set_key(test_key); expected_input.set_is_directory(true); EXPECT_CALL(*GetClient(), DeleteKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _)) .WillOnce(InvokeArgument<2>(absl::OkStatus())); InitializeAgent(); TF_ASSERT_OK(agent_->DeleteKeyValue(test_key)); } TEST_F(CPluginCoordinationServiceAgentTest, TryGetKeyValue_Simple_Success) { const std::string& test_key = "test_key"; const std::string& test_value = "test_value"; TryGetKeyValueResponse mocked_response; auto kv = mocked_response.mutable_kv(); kv->set_key(test_key); kv->set_value(test_value); ON_CALL(*GetClient(), TryGetKeyValueAsync(_, _, _)) .WillByDefault(DoAll(SetArgPointee<1>(mocked_response), InvokeArgument<2>(absl::OkStatus()))); InitializeAgent(); auto result = agent_->TryGetKeyValue(test_key); TF_ASSERT_OK(result.status()); EXPECT_EQ(*result, test_value); } } }
Status CPluginCoordinationServiceAgent::DeleteKeyValue(std::string_view key) { TF_StatusPtr c_status_ptr(TF_NewStatus()); TF_Status* status = c_status_ptr.get(); TF_CoordinationServiceDeleteKeyValue(key.data(), key.size(), agent_, status); return StatusFromTF_Status(status); }
TEST_F(CPluginCoordinationServiceAgentTest, DeleteKeyValue_Success) { const std::string test_key = "test_x_key"; DeleteKeyValueRequest expected_input; expected_input.set_key(test_key); expected_input.set_is_directory(true); EXPECT_CALL(*GetClient(), DeleteKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _)) .WillOnce(InvokeArgument<2>(absl::OkStatus())); InitializeAgent(); TF_ASSERT_OK(agent_->DeleteKeyValue(test_key)); }
"#include \"tensorflow/core/kernels/data/shard_dataset_op.h\"\n#include <cstdlib>\n#include <functio(...TRUNCATED)
"#include \"tensorflow/core/kernels/data/shard_dataset_op.h\"\n#include \"tensorflow/core/data/datas(...TRUNCATED)
"const std::vector<PartialTensorShape>& output_shapes() const override {\n return input_->output_(...TRUNCATED)
"TEST_F(ShardDatasetOpTest, DatasetOutputShapes) {\n auto dataset_params = ShardDatasetParams1();\n(...TRUNCATED)
"#ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_DOUBLE_WRAPPER_TYPE_H_\n#define THIRD_PARTY_CEL_CPP_COMMON(...TRUNCATED)
"#include <sstream>\n#include \"absl/hash/hash.h\"\n#include \"absl/types/optional.h\"\n#include \"c(...TRUNCATED)
"inline std::ostream& operator<<(std::ostream& out,\n const DoubleWra(...TRUNCATED)
"TEST(DoubleWrapperType, DebugString) {\n {\n std::ostringstream out;\n out << DoubleWrapperT(...TRUNCATED)
"#include \"quiche/common/quiche_random.h\"\n#include <cstdint>\n#include <cstring>\n#include \"open(...TRUNCATED)
"#include \"quiche/common/quiche_random.h\"\n#include \"quiche/common/platform/api/quiche_test.h\"\n(...TRUNCATED)
"uint64_t RandUint64() override;\n void InsecureRandBytes(void* data, size_t len) override;\n uint(...TRUNCATED)
"TEST(QuicheRandom, RandUint64) {\n auto rng = QuicheRandom::GetInstance();\n uint64_t value1 = rn(...TRUNCATED)
"#include \"tsl/lib/io/buffered_inputstream.h\"\n#include \"absl/status/status.h\"\n#include \"tsl/l(...TRUNCATED)
"#include \"tsl/lib/io/buffered_inputstream.h\"\n#include \"tsl/lib/core/status_test_util.h\"\n#incl(...TRUNCATED)
int64_t BufferedInputStream::Tell() const { return input_stream_->Tell() - (limit_ - pos_); }
"TEST(BufferedInputStream, ReadNBytes) {\n Env* env = Env::Default();\n string fname;\n ASSERT_TR(...TRUNCATED)
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
1
Edit dataset card