Original Code File
stringlengths
196
31.9k
Oniginal Ground Truth
stringlengths
78
32k
Code
stringlengths
15
27.3k
Unit Test
stringlengths
0
30.4k
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h" #include "absl/strings/str_cat.h" #include "absl/strings/substitute.h" #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/stringpiece.h" namespace tensorflow { namespace generator { namespace cpp { Renderer::Renderer(RendererContext context) : context_(context) {} Renderer& Renderer::BlankLine() { context_.code.AddLineWithoutIndent(""); return *this; } Renderer& Renderer::CodeLine(const string& text) { context_.code.AddLineWithoutIndent(text); return *this; } Renderer& Renderer::CodeLines(const string& text) { StringPiece trimmed_text(text); str_util::RemoveWhitespaceContext(&trimmed_text); for (const string& line : str_util::Split(trimmed_text, '\n')) { context_.code.AddLineWithoutIndent(line); } return *this; } Renderer& Renderer::Statement(const string& text) { if (str_util::EndsWith(text, ";")) { LOG(WARNING) << "Superfluous terminating ';' in '" << text << "'"; context_.code.AddLineWithIndent(text); } else { context_.code.AddLineWithIndent(absl::StrCat(text, ";")); } return *this; } Renderer& Renderer::TFStatement(const string& text) { return Statement(absl::Substitute("TF_RETURN_IF_ERROR($0)", text)); } Renderer& Renderer::CommentLine(const string& text) { context_.code.AddLineWithIndent(absl::StrCat(" return *this; } Renderer& Renderer::BlockOpen(const string& text) { context_.code.AddLineWithIndent(absl::StrCat(text, " {")); context_.code.IncreaseIndent(); return *this; } Renderer& Renderer::BlockClose(const string& text) { context_.code.DecreaseIndent(); context_.code.AddLineWithIndent(absl::StrCat("}", text)); return *this; } } } }
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h" #include "tensorflow/c/experimental/ops/gen/common/path_config.h" #include "tensorflow/c/experimental/ops/gen/common/source_code.h" #include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_config.h" #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace generator { namespace cpp { namespace { TEST(Renderer, typical_usage) { class TestRenderer : Renderer { public: explicit TestRenderer(SourceCode& code) : Renderer( {RendererContext::kSource, code, CppConfig(), PathConfig()}) {} void Render() { CommentLine("File level comment."); CodeLine("#include \"header.h\""); BlankLine(); BlockOpen("void TestFunction()"); { Statement("int i = 1"); BlankLine(); BlockOpen("while (i == 1)"); { CommentLine("Do nothing, really...."); CodeLine("#if 0"); Statement("call()"); CodeLine("#endif"); BlockClose(); } BlockClose(" } } }; SourceCode code; TestRenderer(code).Render(); string expected = R"( #include "header.h" void TestFunction() { int i = 1; while (i == 1) { #if 0 call(); #endif } } )"; code.SetSpacesPerIndent(3); EXPECT_EQ(expected, code.Render()); } } } } }
Renderer& Renderer::BlankLine() { context_.code.AddLineWithoutIndent(""); return *this; }
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h" #include "tensorflow/c/experimental/ops/gen/common/path_config.h" #include "tensorflow/c/experimental/ops/gen/common/source_code.h" #include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_config.h" #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace generator { namespace cpp { namespace { TEST(Renderer, typical_usage) { class TestRenderer : Renderer { public: explicit TestRenderer(SourceCode& code) : Renderer( {RendererContext::kSource, code, CppConfig(), PathConfig()}) {} void Render() { CommentLine("File level comment."); CodeLine("#include \"header.h\""); BlankLine(); BlockOpen("void TestFunction()"); { Statement("int i = 1"); BlankLine(); BlockOpen("while (i == 1)"); { CommentLine("Do nothing, really...."); CodeLine("#if 0"); Statement("call()"); CodeLine("#endif"); BlockClose(); } BlockClose(" } } }; SourceCode code; TestRenderer(code).Render(); string expected = R"( #include "header.h" void TestFunction() { int i = 1; while (i == 1) { #if 0 call(); #endif } } )"; code.SetSpacesPerIndent(3); EXPECT_EQ(expected, code.Render()); }
#include "tensorflow/core/data/service/common.h" #include <string> #include "absl/strings/string_view.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/framework/dataset_options.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/protobuf/data_service.pb.h" namespace tensorflow { namespace data { namespace { constexpr const char kAuto[] = "AUTO"; constexpr const char kAny[] = "ANY"; constexpr const char kLocal[] = "LOCAL"; constexpr const char kColocated[] = "COLOCATED"; constexpr const char kRemote[] = "REMOTE"; constexpr const char kHybrid[] = "HYBRID"; } bool IsNoShard(const ProcessingModeDef& processing_mode) { return processing_mode.sharding_policy() == ProcessingModeDef::OFF; } bool IsDynamicShard(const ProcessingModeDef& processing_mode) { return processing_mode.sharding_policy() == ProcessingModeDef::DYNAMIC; } bool IsStaticShard(const ProcessingModeDef& processing_mode) { return processing_mode.sharding_policy() == ProcessingModeDef::FILE || processing_mode.sharding_policy() == ProcessingModeDef::DATA || processing_mode.sharding_policy() == ProcessingModeDef::FILE_OR_DATA || processing_mode.sharding_policy() == ProcessingModeDef::HINT; } Status ValidateProcessingMode(const ProcessingModeDef& processing_mode) { if (!IsNoShard(processing_mode) && !IsDynamicShard(processing_mode) && !IsStaticShard(processing_mode)) { return errors::Internal( "ProcessingMode ", processing_mode.ShortDebugString(), " does not " "specify a valid sharding policy. Please add the policy to either " "`IsDynamicShard` or `IsStaticShard` (i.e., auto-shard)."); } return absl::OkStatus(); } absl::StatusOr<AutoShardPolicy> ToAutoShardPolicy( const ProcessingModeDef::ShardingPolicy sharding_policy) { switch (sharding_policy) { case ProcessingModeDef::FILE: return AutoShardPolicy::FILE; case ProcessingModeDef::DATA: return AutoShardPolicy::DATA; case ProcessingModeDef::FILE_OR_DATA: return AutoShardPolicy::AUTO; case ProcessingModeDef::HINT: return AutoShardPolicy::HINT; case ProcessingModeDef::DYNAMIC: case ProcessingModeDef::OFF: return AutoShardPolicy::OFF; default: return errors::Internal( "tf.data service sharding policy ", ProcessingModeDef::ShardingPolicy_Name(sharding_policy), " is not convertible to a valid auto-shard policy. If you're " "defining a new sharding policy, please update the policy mapping."); } } absl::StatusOr<TargetWorkers> ParseTargetWorkers(absl::string_view s) { std::string str_upper = absl::AsciiStrToUpper(s); if (str_upper.empty() || str_upper == kAuto) { return TARGET_WORKERS_AUTO; } if (str_upper == kAny) { return TARGET_WORKERS_ANY; } if (str_upper == kLocal) { return TARGET_WORKERS_LOCAL; } return errors::InvalidArgument("Unrecognized target workers: ", s); } std::string TargetWorkersToString(TargetWorkers target_workers) { switch (target_workers) { case TARGET_WORKERS_AUTO: return kAuto; case TARGET_WORKERS_ANY: return kAny; case TARGET_WORKERS_LOCAL: return kLocal; default: DCHECK(false); return "UNKNOWN"; } } absl::StatusOr<DeploymentMode> ParseDeploymentMode(absl::string_view s) { std::string str_upper = absl::AsciiStrToUpper(s); if (str_upper == kColocated) { return DEPLOYMENT_MODE_COLOCATED; } if (str_upper == kRemote) { return DEPLOYMENT_MODE_REMOTE; } if (str_upper == kHybrid) { return DEPLOYMENT_MODE_HYBRID; } return errors::InvalidArgument("Invalid tf.data service deployment mode: ", s, ". Supported modes are " "COLOCATED, REMOTE, and HYBRID."); } bool IsPreemptedError(const Status& status) { return errors::IsAborted(status) || errors::IsCancelled(status) || errors::IsUnavailable(status); } } }
#include "tensorflow/core/data/service/common.h" #include <vector> #include "tensorflow/core/framework/dataset_options.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/data_service.pb.h" #include "tensorflow/core/protobuf/error_codes.pb.h" namespace tensorflow { namespace data { namespace { using ::tensorflow::testing::IsOkAndHolds; using ::tensorflow::testing::StatusIs; using ::testing::HasSubstr; std::vector<ProcessingModeDef::ShardingPolicy> EnumerateShardingPolicies() { std::vector<ProcessingModeDef::ShardingPolicy> result; const ::tensorflow::protobuf::EnumDescriptor* enum_descriptor = ::tensorflow::protobuf::GetEnumDescriptor< ProcessingModeDef::ShardingPolicy>(); for (int i = 0; i < enum_descriptor->value_count(); ++i) { result.push_back(static_cast<ProcessingModeDef::ShardingPolicy>( enum_descriptor->value(i)->number())); } return result; } TEST(CommonTest, NoShard) { ProcessingModeDef processing_mode; processing_mode.set_sharding_policy(ProcessingModeDef::OFF); EXPECT_TRUE(IsNoShard(processing_mode)); EXPECT_FALSE(IsDynamicShard(processing_mode)); EXPECT_FALSE(IsStaticShard(processing_mode)); } TEST(CommonTest, DynamicShard) { ProcessingModeDef processing_mode; processing_mode.set_sharding_policy(ProcessingModeDef::DYNAMIC); EXPECT_FALSE(IsNoShard(processing_mode)); EXPECT_TRUE(IsDynamicShard(processing_mode)); EXPECT_FALSE(IsStaticShard(processing_mode)); } TEST(CommonTest, StaticShard) { ProcessingModeDef processing_mode; std::vector<ProcessingModeDef::ShardingPolicy> policies = { ProcessingModeDef::FILE, ProcessingModeDef::DATA, ProcessingModeDef::FILE_OR_DATA, ProcessingModeDef::HINT}; for (const ProcessingModeDef::ShardingPolicy policy : policies) { processing_mode.set_sharding_policy(policy); EXPECT_FALSE(IsNoShard(processing_mode)); EXPECT_FALSE(IsDynamicShard(processing_mode)); EXPECT_TRUE(IsStaticShard(processing_mode)); } } TEST(CommonTest, DefaultShardingPolicyIsNoShard) { ProcessingModeDef processing_mode; EXPECT_TRUE(IsNoShard(processing_mode)); EXPECT_FALSE(IsDynamicShard(processing_mode)); EXPECT_FALSE(IsStaticShard(processing_mode)); } TEST(CommonTest, ToAutoShardPolicy) { EXPECT_THAT(ToAutoShardPolicy(ProcessingModeDef::FILE_OR_DATA), IsOkAndHolds(AutoShardPolicy::AUTO)); EXPECT_THAT(ToAutoShardPolicy(ProcessingModeDef::HINT), IsOkAndHolds(AutoShardPolicy::HINT)); EXPECT_THAT(ToAutoShardPolicy(ProcessingModeDef::OFF), IsOkAndHolds(AutoShardPolicy::OFF)); EXPECT_THAT(ToAutoShardPolicy(ProcessingModeDef::DYNAMIC), IsOkAndHolds(AutoShardPolicy::OFF)); } TEST(CommonTest, ConvertValidShardingPolicyToAutoShardPolicy) { for (const ProcessingModeDef::ShardingPolicy sharding_policy : EnumerateShardingPolicies()) { TF_EXPECT_OK(ToAutoShardPolicy(sharding_policy).status()); } } TEST(CommonTest, ConvertInvalidShardingPolicyToAutoShardPolicy) { const ProcessingModeDef::ShardingPolicy sharding_policy = static_cast<ProcessingModeDef::ShardingPolicy>(-100); EXPECT_THAT(ToAutoShardPolicy(sharding_policy), StatusIs(error::INTERNAL, HasSubstr("please update the policy mapping."))); } TEST(CommonTest, ValidateProcessingMode) { for (const ProcessingModeDef::ShardingPolicy policy : EnumerateShardingPolicies()) { ProcessingModeDef processing_mode; processing_mode.set_sharding_policy(policy); TF_EXPECT_OK(ValidateProcessingMode(processing_mode)); } } TEST(CommonTest, InvalidProcessingMode) { ProcessingModeDef processing_mode; processing_mode.set_sharding_policy( static_cast<ProcessingModeDef::ShardingPolicy>(100)); EXPECT_THAT(ValidateProcessingMode(processing_mode), StatusIs(error::INTERNAL, HasSubstr("does not specify a valid sharding policy."))); } TEST(CommonTest, ParseTargetWorkers) { EXPECT_THAT(ParseTargetWorkers("AUTO"), IsOkAndHolds(TARGET_WORKERS_AUTO)); EXPECT_THAT(ParseTargetWorkers("Auto"), IsOkAndHolds(TARGET_WORKERS_AUTO)); EXPECT_THAT(ParseTargetWorkers("ANY"), IsOkAndHolds(TARGET_WORKERS_ANY)); EXPECT_THAT(ParseTargetWorkers("any"), IsOkAndHolds(TARGET_WORKERS_ANY)); EXPECT_THAT(ParseTargetWorkers("LOCAL"), IsOkAndHolds(TARGET_WORKERS_LOCAL)); EXPECT_THAT(ParseTargetWorkers("local"), IsOkAndHolds(TARGET_WORKERS_LOCAL)); EXPECT_THAT(ParseTargetWorkers(""), IsOkAndHolds(TARGET_WORKERS_AUTO)); } TEST(CommonTest, ParseInvalidTargetWorkers) { EXPECT_THAT(ParseTargetWorkers("TARGET_WORKERS_UNSPECIFIED"), testing::StatusIs(error::INVALID_ARGUMENT)); EXPECT_THAT(ParseTargetWorkers("UNSET"), testing::StatusIs(error::INVALID_ARGUMENT)); } TEST(CommonTest, TargetWorkersToString) { EXPECT_EQ(TargetWorkersToString(TARGET_WORKERS_AUTO), "AUTO"); EXPECT_EQ(TargetWorkersToString(TARGET_WORKERS_ANY), "ANY"); EXPECT_EQ(TargetWorkersToString(TARGET_WORKERS_LOCAL), "LOCAL"); } TEST(CommonTest, ParseDeploymentMode) { EXPECT_THAT(ParseDeploymentMode("COLOCATED"), IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_COLOCATED)); EXPECT_THAT(ParseDeploymentMode("Colocated"), IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_COLOCATED)); EXPECT_THAT(ParseDeploymentMode("REMOTE"), IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_REMOTE)); EXPECT_THAT(ParseDeploymentMode("remote"), IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_REMOTE)); EXPECT_THAT(ParseDeploymentMode("HYBRID"), IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_HYBRID)); EXPECT_THAT(ParseDeploymentMode("hybrid"), IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_HYBRID)); } TEST(CommonTest, ParseInvalidDeploymentMode) { EXPECT_THAT(ParseDeploymentMode("DEPLOYMENT_MODE_UNSPECIFIED"), testing::StatusIs(error::INVALID_ARGUMENT)); } TEST(CommonTest, IsPreemptedError) { EXPECT_TRUE(IsPreemptedError(errors::Aborted("Aborted"))); EXPECT_TRUE(IsPreemptedError(errors::Cancelled("Cancelled"))); EXPECT_TRUE(IsPreemptedError(errors::Unavailable("Unavailable"))); EXPECT_FALSE(IsPreemptedError(absl::OkStatus())); } TEST(CommonTest, IsPermanentError) { EXPECT_FALSE( IsPreemptedError(errors::FailedPrecondition("Failed precondition"))); EXPECT_FALSE(IsPreemptedError(errors::Internal("Internal"))); EXPECT_FALSE(IsPreemptedError(errors::InvalidArgument("Invalid argument"))); EXPECT_FALSE(IsPreemptedError(errors::NotFound("Not found"))); EXPECT_FALSE(IsPreemptedError(errors::OutOfRange("Out of range"))); EXPECT_FALSE(IsPreemptedError(errors::Unknown("Unknown"))); } } } }
Status ValidateProcessingMode(const ProcessingModeDef& processing_mode) { if (!IsNoShard(processing_mode) && !IsDynamicShard(processing_mode) && !IsStaticShard(processing_mode)) { return errors::Internal( "ProcessingMode ", processing_mode.ShortDebugString(), " does not " "specify a valid sharding policy. Please add the policy to either " "`IsDynamicShard` or `IsStaticShard` (i.e., auto-shard)."); } return absl::OkStatus(); }
TEST(CommonTest, ValidateProcessingMode) { for (const ProcessingModeDef::ShardingPolicy policy : EnumerateShardingPolicies()) { ProcessingModeDef processing_mode; processing_mode.set_sharding_policy(policy); TF_EXPECT_OK(ValidateProcessingMode(processing_mode)); } } TEST(CommonTest, InvalidProcessingMode) { ProcessingModeDef processing_mode; processing_mode.set_sharding_policy( static_cast<ProcessingModeDef::ShardingPolicy>(100)); EXPECT_THAT(ValidateProcessingMode(processing_mode), StatusIs(error::INTERNAL, HasSubstr("does not specify a valid sharding policy."))); }
#include "tensorflow/core/tfrt/utils/fallback_tensor.h" #include <utility> #include "tensorflow/core/common_runtime/dma_helper.h" namespace tensorflow { namespace tfrt_stub { namespace { class ImmutableTensorBuffer final : public tensorflow::TensorBuffer { public: static tensorflow::core::RefCountPtr<ImmutableTensorBuffer> Create( tensorflow::Tensor tensor); explicit ImmutableTensorBuffer(tensorflow::Tensor tensor) : tensorflow::TensorBuffer(tensor.data()), tensor_(std::move(tensor)) { if (auto* buf = tensorflow::DMAHelper::buffer(&tensor_)) { root_buffer_ = buf->root_buffer(); } else { root_buffer_ = this; } } ~ImmutableTensorBuffer() override = default; size_t size() const override { return tensorflow::DMAHelper::buffer(&tensor_)->size(); } bool OwnsMemory() const override { return false; } tensorflow::TensorBuffer* root_buffer() override { return root_buffer_; } void FillAllocationDescription(AllocationDescription* proto) const override {} bool GetAllocatedBytes(size_t*) const override { return false; } private: tensorflow::Tensor tensor_; tensorflow::TensorBuffer* root_buffer_ = nullptr; }; tensorflow::core::RefCountPtr<ImmutableTensorBuffer> ImmutableTensorBuffer::Create(tensorflow::Tensor tensor) { return tensorflow::core::RefCountPtr<ImmutableTensorBuffer>( new ImmutableTensorBuffer(std::move(tensor))); } } ImmutableTensor ImmutableTensor::Create(tensorflow::Tensor tensor) { auto dtype = tensor.dtype(); auto shape = tensor.shape(); auto immutable_buffer = ImmutableTensorBuffer::Create(std::move(tensor)); return ImmutableTensor( tensorflow::Tensor(dtype, shape, std::move(immutable_buffer))); } } }
#include "tensorflow/core/tfrt/utils/fallback_tensor.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/framework/tensor_shape.h" namespace tensorflow { namespace tfrt_stub { namespace { TEST(FallbackTensorTest, ImmutableTensor) { int32_t scalar = 123; tensorflow::Tensor tensor(scalar); auto immutable_tensor = ImmutableTensor::Create(tensor); ASSERT_EQ(immutable_tensor.tensor().NumElements(), 1); ASSERT_EQ(immutable_tensor.tensor().dtype(), tensorflow::DT_INT32); auto flat = immutable_tensor.tensor().flat<int32_t>(); EXPECT_EQ(flat(0), 123); EXPECT_FALSE(immutable_tensor.tensor().RefCountIsOne()); EXPECT_EQ(tensor.TotalBytes(), immutable_tensor.tensor().TotalBytes()); } TEST(FallbackTensorTest, StringImmutableTensor) { tensorflow::tstring scalar = "string"; tensorflow::Tensor tensor(scalar); auto immutable_tensor = ImmutableTensor::Create(tensor); ASSERT_EQ(immutable_tensor.tensor().NumElements(), 1); ASSERT_EQ(immutable_tensor.tensor().dtype(), tensorflow::DT_STRING); auto flat = immutable_tensor.tensor().flat<tensorflow::tstring>(); EXPECT_EQ(flat(0), "string"); EXPECT_FALSE(immutable_tensor.tensor().RefCountIsOne()); EXPECT_EQ(tensor.TotalBytes(), immutable_tensor.tensor().TotalBytes()); } TEST(FallbackTensorTest, FallbackTensor) { int32_t scalar = 123; tensorflow::Tensor tensor(scalar); { FallbackTensor fallback_tensor(tensor); EXPECT_FALSE(fallback_tensor.is_immutable()); ASSERT_EQ(fallback_tensor.tensor().NumElements(), 1); ASSERT_EQ(fallback_tensor.tensor().dtype(), tensorflow::DT_INT32); auto flat = fallback_tensor.tensor().flat<int32_t>(); EXPECT_EQ(flat(0), 123); FallbackTensor copy(fallback_tensor); FallbackTensor assign; assign = fallback_tensor; ASSERT_EQ(copy.tensor().NumElements(), 1); ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32); EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123); ASSERT_EQ(assign.tensor().NumElements(), 1); ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32); EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123); fallback_tensor = {}; ASSERT_EQ(copy.tensor().NumElements(), 1); ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32); EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123); ASSERT_EQ(assign.tensor().NumElements(), 1); ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32); EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123); } auto immutable_tensor = ImmutableTensor::Create(tensor); { FallbackTensor fallback_tensor(&immutable_tensor); EXPECT_TRUE(fallback_tensor.is_immutable()); ASSERT_EQ(fallback_tensor.tensor().NumElements(), 1); ASSERT_EQ(fallback_tensor.tensor().dtype(), tensorflow::DT_INT32); auto flat = fallback_tensor.tensor().flat<int32_t>(); EXPECT_EQ(flat(0), 123); FallbackTensor copy(fallback_tensor); FallbackTensor assign; assign = fallback_tensor; ASSERT_EQ(copy.tensor().NumElements(), 1); ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32); EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123); ASSERT_EQ(assign.tensor().NumElements(), 1); ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32); EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123); fallback_tensor = {}; ASSERT_EQ(copy.tensor().NumElements(), 1); ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32); EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123); ASSERT_EQ(assign.tensor().NumElements(), 1); ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32); EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123); } } TEST(FallbackTensorTest, FallbackTensorCopy) { int32_t scalar = 123; tensorflow::Tensor tensor(scalar); { FallbackTensor fallback_tensor(tensor); EXPECT_FALSE(fallback_tensor.is_immutable()); auto copy = fallback_tensor; EXPECT_TRUE(copy.is_immutable()); } auto immutable_tensor = ImmutableTensor::Create(tensor); { FallbackTensor fallback_tensor(&immutable_tensor); EXPECT_TRUE(fallback_tensor.is_immutable()); auto copy = fallback_tensor; EXPECT_TRUE(copy.is_immutable()); } } TEST(FallbackTensorTest, FallbackTensorCopyRootBuffer) { int32_t scalar = 123; tensorflow::Tensor tensor(scalar); auto immutable_tensor = ImmutableTensor::Create(tensor); FallbackTensor fallback_tensor(&immutable_tensor); EXPECT_TRUE(fallback_tensor.is_immutable()); EXPECT_EQ(fallback_tensor.buffer()->root_buffer(), tensorflow::DMAHelper::buffer(&tensor)); FallbackTensor copy = fallback_tensor; EXPECT_TRUE(copy.is_immutable()); EXPECT_EQ(copy.buffer()->root_buffer(), tensorflow::DMAHelper::buffer(&tensor)); } TEST(FallbackTensorTest, EmptyTensor) { tensorflow::Tensor tensor(tensorflow::DT_FLOAT, tensorflow::TensorShape({1, 0})); FallbackTensor fallback_tensor(tensor); auto copy = fallback_tensor; ASSERT_FALSE(copy.buffer()); } } } }
bool OwnsMemory() const override { return false; }
#include "tensorflow/core/tfrt/utils/fallback_tensor.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/framework/tensor_shape.h" namespace tensorflow { namespace tfrt_stub { namespace { TEST(FallbackTensorTest, ImmutableTensor) { int32_t scalar = 123; tensorflow::Tensor tensor(scalar); auto immutable_tensor = ImmutableTensor::Create(tensor); ASSERT_EQ(immutable_tensor.tensor().NumElements(), 1); ASSERT_EQ(immutable_tensor.tensor().dtype(), tensorflow::DT_INT32); auto flat = immutable_tensor.tensor().flat<int32_t>(); EXPECT_EQ(flat(0), 123); EXPECT_FALSE(immutable_tensor.tensor().RefCountIsOne()); EXPECT_EQ(tensor.TotalBytes(), immutable_tensor.tensor().TotalBytes()); } TEST(FallbackTensorTest, StringImmutableTensor) { tensorflow::tstring scalar = "string"; tensorflow::Tensor tensor(scalar); auto immutable_tensor = ImmutableTensor::Create(tensor); ASSERT_EQ(immutable_tensor.tensor().NumElements(), 1); ASSERT_EQ(immutable_tensor.tensor().dtype(), tensorflow::DT_STRING); auto flat = immutable_tensor.tensor().flat<tensorflow::tstring>(); EXPECT_EQ(flat(0), "string"); EXPECT_FALSE(immutable_tensor.tensor().RefCountIsOne()); EXPECT_EQ(tensor.TotalBytes(), immutable_tensor.tensor().TotalBytes()); }
#include "xla/backends/profiler/gpu/cupti_error_manager.h" #include <utility> #include "absl/debugging/leak_check.h" #include "tsl/platform/logging.h" namespace xla { namespace profiler { using tsl::mutex_lock; CuptiErrorManager::CuptiErrorManager(std::unique_ptr<CuptiInterface> interface) : interface_(std::move(interface)), disabled_(0), undo_disabled_(false) {} #define IGNORE_CALL_IF_DISABLED \ if (disabled_) { \ LOG(ERROR) << "cupti" << __func__ << ": ignored due to a previous error."; \ return CUPTI_ERROR_DISABLED; \ } \ VLOG(1) << "cupti" << __func__; #define ALLOW_ERROR(e, ERROR) \ if (e == ERROR) { \ VLOG(1) << "cupti" << __func__ << ": error " << static_cast<int>(e) \ << ": " << ResultString(e) << " (allowed)"; \ return e; \ } #define LOG_AND_DISABLE_IF_ERROR(e) \ if (e != CUPTI_SUCCESS) { \ LOG(ERROR) << "cupti" << __func__ << ": error " << static_cast<int>(e) \ << ": " << ResultString(e); \ UndoAndDisable(); \ } void CuptiErrorManager::RegisterUndoFunction( const CuptiErrorManager::UndoFunction& func) { mutex_lock lock(undo_stack_mu_); undo_stack_.push_back(func); } CUptiResult CuptiErrorManager::ActivityDisable(CUpti_ActivityKind kind) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->ActivityDisable(kind); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::ActivityEnable(CUpti_ActivityKind kind) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->ActivityEnable(kind); if (error == CUPTI_SUCCESS) { auto f = std::bind(&CuptiErrorManager::ActivityDisable, this, kind); RegisterUndoFunction(f); } LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::ActivityFlushAll(uint32_t flag) { CUptiResult error = interface_->ActivityFlushAll(flag); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::ActivityGetNextRecord( uint8_t* buffer, size_t valid_buffer_size_bytes, CUpti_Activity** record) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->ActivityGetNextRecord( buffer, valid_buffer_size_bytes, record); ALLOW_ERROR(error, CUPTI_ERROR_MAX_LIMIT_REACHED); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::ActivityGetNumDroppedRecords(CUcontext context, uint32_t stream_id, size_t* dropped) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->ActivityGetNumDroppedRecords(context, stream_id, dropped); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::ActivityConfigureUnifiedMemoryCounter( CUpti_ActivityUnifiedMemoryCounterConfig* config, uint32_t count) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->ActivityConfigureUnifiedMemoryCounter(config, count); return error; } CUptiResult CuptiErrorManager::ActivityRegisterCallbacks( CUpti_BuffersCallbackRequestFunc func_buffer_requested, CUpti_BuffersCallbackCompleteFunc func_buffer_completed) { IGNORE_CALL_IF_DISABLED; absl::LeakCheckDisabler disabler; CUptiResult error = interface_->ActivityRegisterCallbacks( func_buffer_requested, func_buffer_completed); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::ActivityUsePerThreadBuffer() { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->ActivityUsePerThreadBuffer(); return error; } CUptiResult CuptiErrorManager::GetDeviceId(CUcontext context, uint32_t* device_id) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->GetDeviceId(context, device_id); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::GetTimestamp(uint64_t* timestamp) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->GetTimestamp(timestamp); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::Finalize() { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->Finalize(); ALLOW_ERROR(error, CUPTI_ERROR_API_NOT_IMPLEMENTED); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::EnableCallback(uint32_t enable, CUpti_SubscriberHandle subscriber, CUpti_CallbackDomain domain, CUpti_CallbackId callback_id) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->EnableCallback(enable, subscriber, domain, callback_id); if (error == CUPTI_SUCCESS) { if (enable == 1) { auto f = std::bind(&CuptiErrorManager::EnableCallback, this, 0 , subscriber, domain, callback_id); RegisterUndoFunction(f); } } LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::EnableDomain(uint32_t enable, CUpti_SubscriberHandle subscriber, CUpti_CallbackDomain domain) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->EnableDomain(enable, subscriber, domain); if (error == CUPTI_SUCCESS) { if (enable == 1) { auto f = std::bind(&CuptiErrorManager::EnableDomain, this, 0 , subscriber, domain); RegisterUndoFunction(f); } } LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::Subscribe(CUpti_SubscriberHandle* subscriber, CUpti_CallbackFunc callback, void* userdata) { IGNORE_CALL_IF_DISABLED; absl::LeakCheckDisabler disabler; CUptiResult error = interface_->Subscribe(subscriber, callback, userdata); if (error == CUPTI_SUCCESS) { auto f = std::bind(&CuptiErrorManager::Unsubscribe, this, *subscriber); RegisterUndoFunction(f); } LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::Unsubscribe(CUpti_SubscriberHandle subscriber) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->Unsubscribe(subscriber); LOG_AND_DISABLE_IF_ERROR(error); return error; } void CuptiErrorManager::UndoAndDisable() { if (undo_disabled_) { return; } mutex_lock lock(undo_stack_mu_); undo_disabled_ = true; while (!undo_stack_.empty()) { LOG(ERROR) << "CuptiErrorManager is disabling profiling automatically."; undo_stack_.back()(); undo_stack_.pop_back(); } undo_disabled_ = false; disabled_ = 1; } CUptiResult CuptiErrorManager::GetResultString(CUptiResult result, const char** str) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->GetResultString(result, str); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::GetContextId(CUcontext context, uint32_t* context_id) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->GetContextId(context, context_id); LOG_AND_DISABLE_IF_ERROR(error); return error; } CUptiResult CuptiErrorManager::GetStreamIdEx(CUcontext context, CUstream stream, uint8_t per_thread_stream, uint32_t* stream_id) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->GetStreamIdEx(context, stream, per_thread_stream, stream_id); LOG_AND_DISABLE_IF_ERROR(error); return error; } void CuptiErrorManager::CleanUp() { if (undo_disabled_) { return; } mutex_lock lock(undo_stack_mu_); undo_disabled_ = true; while (!undo_stack_.empty()) { undo_stack_.pop_back(); } undo_disabled_ = false; } std::string CuptiErrorManager::ResultString(CUptiResult error) const { const char* error_message = nullptr; if (interface_->GetResultString(error, &error_message) == CUPTI_SUCCESS && error_message != nullptr) { return error_message; } return ""; } } }
#if GOOGLE_CUDA #include "xla/backends/profiler/gpu/cupti_error_manager.h" #include <cstdint> #include <memory> #include <utility> #include "absl/memory/memory.h" #include "xla/backends/profiler/gpu/cuda_test.h" #include "xla/backends/profiler/gpu/cupti_interface.h" #include "xla/backends/profiler/gpu/cupti_tracer.h" #include "xla/backends/profiler/gpu/cupti_wrapper.h" #include "xla/backends/profiler/gpu/mock_cupti.h" #include "tsl/platform/test.h" #include "tsl/profiler/utils/time_utils.h" namespace xla { namespace profiler { namespace test { using xla::profiler::CuptiInterface; using xla::profiler::CuptiTracer; using xla::profiler::CuptiTracerCollectorOptions; using xla::profiler::CuptiTracerOptions; using xla::profiler::CuptiWrapper; using ::testing::_; using ::testing::Invoke; using ::testing::Return; using ::testing::Sequence; using ::testing::StrictMock; class TestableCuptiTracer : public CuptiTracer { public: explicit TestableCuptiTracer(CuptiInterface* cupti_interface) : CuptiTracer(cupti_interface) {} }; class CuptiErrorManagerTest : public ::testing::Test { protected: CuptiErrorManagerTest() {} void SetUp() override { ASSERT_GT(CuptiTracer::NumGpus(), 0) << "No devices found"; auto mock_cupti = std::make_unique<StrictMock<MockCupti>>(); mock_ = mock_cupti.get(); cupti_error_manager_ = std::make_unique<CuptiErrorManager>(std::move(mock_cupti)); cupti_tracer_ = std::make_unique<TestableCuptiTracer>(cupti_error_manager_.get()); cupti_wrapper_ = std::make_unique<CuptiWrapper>(); CuptiTracerCollectorOptions collector_options; collector_options.num_gpus = CuptiTracer::NumGpus(); uint64_t start_gputime_ns = CuptiTracer::GetTimestamp(); uint64_t start_walltime_ns = tsl::profiler::GetCurrentTimeNanos(); cupti_collector_ = CreateCuptiCollector( collector_options, start_walltime_ns, start_gputime_ns); } void EnableProfiling(const CuptiTracerOptions& option) { cupti_tracer_->Enable(option, cupti_collector_.get()); } void DisableProfiling() { cupti_tracer_->Disable(); } bool CuptiDisabled() const { return cupti_error_manager_->Disabled(); } void RunGpuApp() { MemCopyH2D(); PrintfKernel(10); Synchronize(); MemCopyD2H(); } StrictMock<MockCupti>* mock_; std::unique_ptr<TestableCuptiTracer> cupti_tracer_ = nullptr; std::unique_ptr<CuptiInterface> cupti_error_manager_; std::unique_ptr<CuptiWrapper> cupti_wrapper_; std::unique_ptr<xla::profiler::CuptiTraceCollector> cupti_collector_; }; TEST_F(CuptiErrorManagerTest, GpuTraceActivityEnableTest) { Sequence s1; EXPECT_CALL(*mock_, Subscribe(_, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe)); EXPECT_CALL(*mock_, EnableCallback(1, _, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback)); EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer()) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityUsePerThreadBuffer)); EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityRegisterCallbacks)); EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL)) .InSequence(s1) .WillOnce(Return(CUPTI_ERROR_UNKNOWN)); EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString)); EXPECT_CALL(*mock_, EnableCallback(0, _, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback)); EXPECT_CALL(*mock_, Unsubscribe(_)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe)); EXPECT_FALSE(CuptiDisabled()); CuptiTracerOptions options; options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL); options.cbids_selected.push_back(CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel); EnableProfiling(options); EXPECT_TRUE(CuptiDisabled()); RunGpuApp(); EXPECT_TRUE(CuptiDisabled()); DisableProfiling(); EXPECT_TRUE(CuptiDisabled()); } TEST_F(CuptiErrorManagerTest, GpuTraceAutoEnableTest) { EXPECT_FALSE(CuptiDisabled()); Sequence s1; EXPECT_CALL(*mock_, Subscribe(_, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe)); EXPECT_CALL(*mock_, EnableDomain(1, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain)); EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer()) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityUsePerThreadBuffer)); EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityRegisterCallbacks)); EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityEnable)); EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY2)) .InSequence(s1) .WillOnce(Return(CUPTI_ERROR_UNKNOWN)); EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString)); EXPECT_CALL(*mock_, ActivityDisable(CUPTI_ACTIVITY_KIND_MEMCPY)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityDisable)); EXPECT_CALL(*mock_, EnableDomain(0, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain)); EXPECT_CALL(*mock_, Unsubscribe(_)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe)); EXPECT_FALSE(CuptiDisabled()); CuptiTracerOptions options; options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY); options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY2); options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL); EnableProfiling(options); EXPECT_TRUE(CuptiDisabled()); RunGpuApp(); EXPECT_TRUE(CuptiDisabled()); DisableProfiling(); EXPECT_TRUE(CuptiDisabled()); } } } } #endif
CUptiResult CuptiErrorManager::Unsubscribe(CUpti_SubscriberHandle subscriber) { IGNORE_CALL_IF_DISABLED; CUptiResult error = interface_->Unsubscribe(subscriber); LOG_AND_DISABLE_IF_ERROR(error); return error; }
TEST_F(CuptiErrorManagerTest, GpuTraceActivityEnableTest) { Sequence s1; EXPECT_CALL(*mock_, Subscribe(_, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe)); EXPECT_CALL(*mock_, EnableCallback(1, _, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback)); EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer()) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityUsePerThreadBuffer)); EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityRegisterCallbacks)); EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL)) .InSequence(s1) .WillOnce(Return(CUPTI_ERROR_UNKNOWN)); EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString)); EXPECT_CALL(*mock_, EnableCallback(0, _, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback)); EXPECT_CALL(*mock_, Unsubscribe(_)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe)); EXPECT_FALSE(CuptiDisabled()); CuptiTracerOptions options; options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL); options.cbids_selected.push_back(CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel); EnableProfiling(options); EXPECT_TRUE(CuptiDisabled()); RunGpuApp(); EXPECT_TRUE(CuptiDisabled()); DisableProfiling(); EXPECT_TRUE(CuptiDisabled()); } TEST_F(CuptiErrorManagerTest, GpuTraceAutoEnableTest) { EXPECT_FALSE(CuptiDisabled()); Sequence s1; EXPECT_CALL(*mock_, Subscribe(_, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe)); EXPECT_CALL(*mock_, EnableDomain(1, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain)); EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer()) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityUsePerThreadBuffer)); EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityRegisterCallbacks)); EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityEnable)); EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY2)) .InSequence(s1) .WillOnce(Return(CUPTI_ERROR_UNKNOWN)); EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString)); EXPECT_CALL(*mock_, ActivityDisable(CUPTI_ACTIVITY_KIND_MEMCPY)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityDisable)); EXPECT_CALL(*mock_, EnableDomain(0, _, _)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain)); EXPECT_CALL(*mock_, Unsubscribe(_)) .InSequence(s1) .WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe)); EXPECT_FALSE(CuptiDisabled()); CuptiTracerOptions options; options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY); options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY2); options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL); EnableProfiling(options); EXPECT_TRUE(CuptiDisabled()); RunGpuApp(); EXPECT_TRUE(CuptiDisabled()); DisableProfiling(); EXPECT_TRUE(CuptiDisabled()); }
#include "xla/python/ifrt/shape.h" #include <cstdint> #include <ostream> #include <string> #include <utility> #include <variant> #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "xla/python/ifrt/shape.pb.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace ifrt { namespace { template <class... Ts> struct overloaded : Ts... { using Ts::operator()...; }; template <class... Ts> overloaded(Ts...) -> overloaded<Ts...>; } absl::StatusOr<Shape> Shape::FromProto(const ShapeProto& proto) { Shape::Dimensions dims; dims.reserve(proto.dims_size()); for (int64_t dim : proto.dims()) { if (dim < 0) { return InvalidArgument( "Shape expects non-negative dimension sizes, but got %d", dim); } dims.push_back(dim); } return Shape(std::move(dims)); } ShapeProto Shape::ToProto() const { ShapeProto proto; proto.mutable_dims()->Reserve(dims().size()); for (int64_t dim : dims()) { proto.mutable_dims()->AddAlreadyReserved(dim); } return proto; } int64_t Shape::num_elements() const { int64_t count = 1; for (int64_t d : dims_) { count *= d; } return count; } std::string Shape::DebugString() const { return absl::StrCat("[", absl::StrJoin(dims_, ","), "]"); } absl::StatusOr<BoundedDynamicShapeTag> BoundedDynamicShapeTag::FromProto( const BoundedDynamicShapeTagProto& proto) { BoundedDynamicShapeTag::DynamicDimensions dynamic_dims; dynamic_dims.reserve(proto.is_dynamic_dims_size()); for (bool dynamic_dim : proto.is_dynamic_dims()) { dynamic_dims.push_back(dynamic_dim); } return BoundedDynamicShapeTag(std::move(dynamic_dims)); } BoundedDynamicShapeTagProto BoundedDynamicShapeTag::ToProto() const { BoundedDynamicShapeTagProto proto; proto.mutable_is_dynamic_dims()->Reserve(dynamic_dims_.size()); for (bool dynamic_dim : dynamic_dims_) { proto.mutable_is_dynamic_dims()->AddAlreadyReserved(dynamic_dim); } return proto; } absl::StatusOr<DynamicShape> DynamicShape::Create(Shape shape, DynamicShapeTag tag) { TF_RETURN_IF_ERROR(std::visit( overloaded{ [&](const BoundedDynamicShapeTag& tag) -> absl::Status { if (tag.DynamicDims().size() != shape.dims().size()) { return InvalidArgument( "Shape and tag must have the same number of dimensions."); } return absl::OkStatus(); }, }, tag)); return DynamicShape(std::move(shape), std::move(tag)); } absl::StatusOr<Shape> DynamicShape::GetPaddedShape() const { return std::visit( overloaded{ [this](BoundedDynamicShapeTag tag) { return shape_; }, }, tag_); } bool DynamicShape::IsDynamicDim(int dimension) const { return std::visit( overloaded{ [dimension](BoundedDynamicShapeTag tag) { return tag.DynamicDims().at(dimension); }, }, tag_); } absl::StatusOr<DynamicShape> DynamicShape::FromProto( const DynamicShapeProto& proto) { TF_ASSIGN_OR_RETURN(Shape shape, Shape::FromProto(proto.shape())); if (proto.has_bounded_dynamic_shape_tag()) { TF_ASSIGN_OR_RETURN( BoundedDynamicShapeTag tag, BoundedDynamicShapeTag::FromProto(proto.bounded_dynamic_shape_tag())); return DynamicShape::Create(std::move(shape), std::move(tag)); } return InvalidArgument("Only support bounded dynamic shape."); } DynamicShapeProto DynamicShape::ToProto() const { DynamicShapeProto proto; *proto.mutable_shape() = shape_.ToProto(); std::visit( overloaded{ [&proto](BoundedDynamicShapeTag tag) { *proto.mutable_bounded_dynamic_shape_tag() = tag.ToProto(); }, }, tag_); return proto; } std::string DynamicShape::DebugString() const { return std::visit( overloaded{[this](BoundedDynamicShapeTag tag) { absl::InlinedVector<std::string, Shape::kInlineDimensionSize> dim_reps; dim_reps.reserve(shape_.dims().size()); for (int i = 0; i < shape_.dims().size(); ++i) { absl::string_view prefix = tag.DynamicDims()[i] ? "<=" : ""; dim_reps.push_back(absl::StrCat(prefix, shape_.dims()[i])); } return absl::StrCat("[", absl::StrJoin(dim_reps, ","), "]"); }}, tag_); } std::ostream& operator<<(std::ostream& os, const Shape& shape) { return os << shape.DebugString(); } std::ostream& operator<<(std::ostream& os, const DynamicShape& dynamic_shape) { return os << dynamic_shape.DebugString(); } } }
#include "xla/python/ifrt/shape.h" #include <cstdint> #include <limits> #include <numeric> #include <sstream> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "xla/python/ifrt/shape.pb.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace ifrt { namespace { using ::testing::ElementsAre; using ::testing::ElementsAreArray; using ::testing::HasSubstr; using ::tsl::testing::StatusIs; TEST(ShapeTest, LargeDim) { Shape shape({std::numeric_limits<int64_t>::max()}); EXPECT_THAT(shape.dims(), ElementsAre(std::numeric_limits<int64_t>::max())); } TEST(ShapeTest, ManyDims) { const int kNumDims = 65536; std::vector<int64_t> dims(kNumDims); std::iota(dims.begin(), dims.end(), 0); Shape shape(dims); EXPECT_THAT(shape.dims(), ElementsAreArray(dims)); } TEST(ShapeTest, ScalarNumElements) { Shape shape({}); EXPECT_EQ(shape.num_elements(), 1); } TEST(ShapeTest, ZeroDimNumElements) { { Shape shape({0}); EXPECT_EQ(shape.num_elements(), 0); } { Shape shape({1, 0}); EXPECT_EQ(shape.num_elements(), 0); } { Shape shape({0, 1}); EXPECT_EQ(shape.num_elements(), 0); } { Shape shape({0, 0}); EXPECT_EQ(shape.num_elements(), 0); } } TEST(ShapeTest, NonZeroDimsNumElements) { { Shape shape({2}); EXPECT_EQ(shape.num_elements(), 2); } { Shape shape({2, 3}); EXPECT_EQ(shape.num_elements(), 6); } } TEST(ShapeTest, ToFromProto) { { Shape shape({}); ShapeProto proto = shape.ToProto(); TF_ASSERT_OK_AND_ASSIGN(Shape shape_copy, shape.FromProto(proto)); EXPECT_EQ(shape_copy, shape); } { Shape shape({1, 2}); ShapeProto proto = shape.ToProto(); TF_ASSERT_OK_AND_ASSIGN(Shape shape_copy, shape.FromProto(proto)); EXPECT_EQ(shape_copy, shape); } } TEST(BoundedDynamicShapeTagDeathTest, NoDynamicDim) { EXPECT_DEATH(BoundedDynamicShapeTag tag({false, false}), "At least one dimension needs to be dynamically sized"); } TEST(BoundedDynamicShapeTagTest, ToFromProto) { BoundedDynamicShapeTag tag({true, false}); BoundedDynamicShapeTagProto proto = tag.ToProto(); TF_ASSERT_OK_AND_ASSIGN(BoundedDynamicShapeTag tag_copy, tag.FromProto(proto)); EXPECT_EQ(tag_copy, tag); } TEST(DynamicShapeTest, SizeMismatch) { Shape shape({1, 2, 3}); BoundedDynamicShapeTag tag({true, true}); EXPECT_THAT(DynamicShape::Create(shape, tag), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("must have the same number of dimensions"))); } TEST(DynamicShapeTest, Equality) { TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape1, DynamicShape::Create(Shape({2, 4}), BoundedDynamicShapeTag({true, false}))); TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape2, DynamicShape::Create(Shape({3, 4}), BoundedDynamicShapeTag({true, false}))); TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape3, DynamicShape::Create(Shape({2, 4}), BoundedDynamicShapeTag({true, true}))); TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape4, DynamicShape::Create(Shape({2, 4, 3}), BoundedDynamicShapeTag({true, false, true}))); EXPECT_EQ(shape1, shape1); EXPECT_NE(shape1, shape2); EXPECT_NE(shape1, shape3); EXPECT_NE(shape1, shape4); } TEST(DynamicShapeTest, IsDynamicDim) { Shape shape({1, 2, 3}); BoundedDynamicShapeTag tag({true, false, true}); TF_ASSERT_OK_AND_ASSIGN(DynamicShape dynamic_shape, DynamicShape::Create(shape, tag)); EXPECT_TRUE(dynamic_shape.IsDynamicDim(0)); EXPECT_FALSE(dynamic_shape.IsDynamicDim(1)); EXPECT_TRUE(dynamic_shape.IsDynamicDim(2)); } TEST(DynamicShapeTest, GetPaddedShape) { Shape shape({1, 2, 3}); BoundedDynamicShapeTag tag({true, true, true}); TF_ASSERT_OK_AND_ASSIGN(DynamicShape dynamic_shape, DynamicShape::Create(shape, tag)); TF_ASSERT_OK_AND_ASSIGN(Shape padded_shape, dynamic_shape.GetPaddedShape()); EXPECT_EQ(padded_shape, shape); } TEST(DynamicShapeTest, ToFromProto) { TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape, DynamicShape::Create(Shape({2, 4}), BoundedDynamicShapeTag({true, false}))); DynamicShapeProto proto = shape.ToProto(); TF_ASSERT_OK_AND_ASSIGN(DynamicShape shape_copy, shape.FromProto(proto)); EXPECT_EQ(shape_copy, shape); } TEST(DynamicShapeTest, ToString) { { TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape, DynamicShape::Create(Shape({2, 4}), BoundedDynamicShapeTag({true, true}))); std::ostringstream output; output << shape; EXPECT_EQ(output.str(), "[<=2,<=4]"); } { TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape, DynamicShape::Create(Shape({2, 4}), BoundedDynamicShapeTag({false, true}))); std::ostringstream output; output << shape; EXPECT_EQ(output.str(), "[2,<=4]"); } } } } }
absl::StatusOr<DynamicShape> DynamicShape::Create(Shape shape, DynamicShapeTag tag) { TF_RETURN_IF_ERROR(std::visit( overloaded{ [&](const BoundedDynamicShapeTag& tag) -> absl::Status { if (tag.DynamicDims().size() != shape.dims().size()) { return InvalidArgument( "Shape and tag must have the same number of dimensions."); } return absl::OkStatus(); }, }, tag)); return DynamicShape(std::move(shape), std::move(tag)); }
TEST(DynamicShapeTest, SizeMismatch) { Shape shape({1, 2, 3}); BoundedDynamicShapeTag tag({true, true}); EXPECT_THAT(DynamicShape::Create(shape, tag), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("must have the same number of dimensions"))); } TEST(DynamicShapeTest, Equality) { TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape1, DynamicShape::Create(Shape({2, 4}), BoundedDynamicShapeTag({true, false}))); TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape2, DynamicShape::Create(Shape({3, 4}), BoundedDynamicShapeTag({true, false}))); TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape3, DynamicShape::Create(Shape({2, 4}), BoundedDynamicShapeTag({true, true}))); TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape4, DynamicShape::Create(Shape({2, 4, 3}), BoundedDynamicShapeTag({true, false, true}))); EXPECT_EQ(shape1, shape1); EXPECT_NE(shape1, shape2); EXPECT_NE(shape1, shape3); EXPECT_NE(shape1, shape4); } TEST(DynamicShapeTest, IsDynamicDim) { Shape shape({1, 2, 3}); BoundedDynamicShapeTag tag({true, false, true}); TF_ASSERT_OK_AND_ASSIGN(DynamicShape dynamic_shape, DynamicShape::Create(shape, tag)); EXPECT_TRUE(dynamic_shape.IsDynamicDim(0)); EXPECT_FALSE(dynamic_shape.IsDynamicDim(1)); EXPECT_TRUE(dynamic_shape.IsDynamicDim(2)); } TEST(DynamicShapeTest, GetPaddedShape) { Shape shape({1, 2, 3}); BoundedDynamicShapeTag tag({true, true, true}); TF_ASSERT_OK_AND_ASSIGN(DynamicShape dynamic_shape, DynamicShape::Create(shape, tag)); TF_ASSERT_OK_AND_ASSIGN(Shape padded_shape, dynamic_shape.GetPaddedShape()); EXPECT_EQ(padded_shape, shape); } TEST(DynamicShapeTest, ToFromProto) { TF_ASSERT_OK_AND_ASSIGN( DynamicShape shape, DynamicShape::Create(Shape({2, 4}), BoundedDynamicShapeTag({true, false}))); DynamicShapeProto proto = shape.ToProto(); TF_ASSERT_OK_AND_ASSIGN(DynamicShape shape_copy, shape.FromProto(proto)); EXPECT_EQ(shape_copy, shape); }
#include "arolla/decision_forest/pointwise_evaluation/oblivious.h" #include <cstddef> #include <memory> #include <optional> #include <utility> #include <vector> #include "absl/log/check.h" #include "arolla/decision_forest/decision_forest.h" #include "arolla/decision_forest/split_condition.h" namespace arolla { namespace { bool IsPowerOf2(size_t x) { return (x & (x - 1)) == 0; } struct StackEntry { DecisionTreeNodeId node_id; int depth; }; template <typename CallbackFn> bool TraverseTree(const DecisionTree& tree, CallbackFn callback) { std::vector<StackEntry> stack; stack.reserve(32); stack.push_back(StackEntry{GetTreeRootId(tree), 0}); while (!stack.empty()) { auto [node_id, depth] = stack.back(); stack.pop_back(); if (!callback(node_id, depth)) { return false; } if (!node_id.is_leaf()) { const auto& node = tree.split_nodes[node_id.split_node_index()]; stack.push_back(StackEntry{node.child_if_true, depth + 1}); stack.push_back(StackEntry{node.child_if_false, depth + 1}); } } return true; } } std::optional<ObliviousDecisionTree> ToObliviousTree(const DecisionTree& tree) { size_t region_count = tree.adjustments.size(); if (!IsPowerOf2(region_count)) { return std::nullopt; } size_t depth = region_count ? __builtin_ctz(region_count) : 0; std::vector<std::shared_ptr<const SplitCondition>> layer_splits; layer_splits.reserve(depth); std::vector<float> adjustments; adjustments.reserve(region_count); auto process_node = [&](DecisionTreeNodeId node_id, int current_depth) { if (node_id.is_leaf()) { if (current_depth != depth) { return false; } adjustments.push_back(tree.adjustments[node_id.adjustment_index()] * tree.weight); } else { if (current_depth >= depth) { return false; } const auto& node = tree.split_nodes[node_id.split_node_index()]; if (layer_splits.size() == current_depth) { layer_splits.push_back(node.condition); } else { DCHECK_LT(current_depth, layer_splits.size()); if (*layer_splits[current_depth] != *node.condition) { return false; } } } return true; }; if (!TraverseTree(tree, process_node)) { return std::nullopt; } return ObliviousDecisionTree{tree.tag, std::move(layer_splits), std::move(adjustments)}; } }
#include "arolla/decision_forest/pointwise_evaluation/oblivious.h" #include <limits> #include <memory> #include <optional> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "arolla/decision_forest/decision_forest.h" #include "arolla/decision_forest/split_condition.h" #include "arolla/decision_forest/split_conditions/interval_split_condition.h" namespace arolla { namespace { using ::testing::ElementsAre; constexpr auto S = DecisionTreeNodeId::SplitNodeId; constexpr auto A = DecisionTreeNodeId::AdjustmentId; constexpr float inf = std::numeric_limits<float>::infinity(); std::shared_ptr<SplitCondition> Cond(int input_id, float left, float right) { return std::make_shared<IntervalSplitCondition>(input_id, left, right); } TEST(ObliviousTest, Errors) { { DecisionTree tree; tree.split_nodes = {{A(0), S(1), Cond(0, -inf, 1.0)}, {A(1), A(2), Cond(0, -1.0, inf)}}; tree.adjustments = {0.0, 1.0, 2.0}; EXPECT_EQ(ToObliviousTree(tree), std::nullopt); } { DecisionTree tree; tree.split_nodes = {{A(0), S(1), Cond(0, -inf, 1.0)}, {S(2), A(2), Cond(0, -1.0, inf)}, {A(1), A(3), Cond(0, -1.0, inf)}}; tree.adjustments = {0.0, 1.0, 2.0, 3.0}; EXPECT_EQ(ToObliviousTree(tree), std::nullopt); } { DecisionTree tree; tree.split_nodes = {{S(2), S(1), Cond(0, -inf, 1.0)}, {A(1), A(2), Cond(0, -1.0, inf)}, {A(0), A(3), Cond(0, 1.0, inf)}}; tree.adjustments = {0.0, 1.0, 2.0, 3.0}; EXPECT_EQ(ToObliviousTree(tree), std::nullopt); } } TEST(ObliviousTest, Ok) { { DecisionTree tree; tree.adjustments = {2.0}; tree.weight = 0.5; auto oblivious_tree = ToObliviousTree(tree); ASSERT_TRUE(oblivious_tree.has_value()); EXPECT_THAT(oblivious_tree->layer_splits, ElementsAre()); EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(1.0)); } { DecisionTree tree; tree.split_nodes = {{A(0), A(1), Cond(0, -inf, 1.0)}}; tree.adjustments = {7.0, 3.0}; tree.weight = 2.0; auto oblivious_tree = ToObliviousTree(tree); ASSERT_TRUE(oblivious_tree.has_value()); EXPECT_EQ(oblivious_tree->layer_splits.size(), 1); EXPECT_EQ(*oblivious_tree->layer_splits[0], *Cond(0, -inf, 1.0)); EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(14.0, 6.0)); } { DecisionTree tree; tree.split_nodes = {{S(2), S(1), Cond(0, -inf, 1.0)}, {A(1), A(2), Cond(0, -1.0, inf)}, {A(0), A(3), Cond(0, -1.0, inf)}}; tree.adjustments = {0.0, 1.0, 2.0, 3.0}; auto oblivious_tree = ToObliviousTree(tree); ASSERT_TRUE(oblivious_tree.has_value()); EXPECT_EQ(oblivious_tree->layer_splits.size(), 2); EXPECT_EQ(*oblivious_tree->layer_splits[0], *Cond(0, -inf, 1.0)); EXPECT_EQ(*oblivious_tree->layer_splits[1], *Cond(0, -1.0, inf)); EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(0.0, 3.0, 1.0, 2.0)); } } } }
template <typename CallbackFn> bool TraverseTree(const DecisionTree& tree, CallbackFn callback) { std::vector<StackEntry> stack; stack.reserve(32); stack.push_back(StackEntry{GetTreeRootId(tree), 0}); while (!stack.empty()) { auto [node_id, depth] = stack.back(); stack.pop_back(); if (!callback(node_id, depth)) { return false; } if (!node_id.is_leaf()) { const auto& node = tree.split_nodes[node_id.split_node_index()]; stack.push_back(StackEntry{node.child_if_true, depth + 1}); stack.push_back(StackEntry{node.child_if_false, depth + 1}); } } return true; }
TEST(ObliviousTest, Errors) { { DecisionTree tree; tree.split_nodes = {{A(0), S(1), Cond(0, -inf, 1.0)}, {A(1), A(2), Cond(0, -1.0, inf)}}; tree.adjustments = {0.0, 1.0, 2.0}; EXPECT_EQ(ToObliviousTree(tree), std::nullopt); } { DecisionTree tree; tree.split_nodes = {{A(0), S(1), Cond(0, -inf, 1.0)}, {S(2), A(2), Cond(0, -1.0, inf)}, {A(1), A(3), Cond(0, -1.0, inf)}}; tree.adjustments = {0.0, 1.0, 2.0, 3.0}; EXPECT_EQ(ToObliviousTree(tree), std::nullopt); } { DecisionTree tree; tree.split_nodes = {{S(2), S(1), Cond(0, -inf, 1.0)}, {A(1), A(2), Cond(0, -1.0, inf)}, {A(0), A(3), Cond(0, 1.0, inf)}}; tree.adjustments = {0.0, 1.0, 2.0, 3.0}; EXPECT_EQ(ToObliviousTree(tree), std::nullopt); } } TEST(ObliviousTest, Ok) { { DecisionTree tree; tree.adjustments = {2.0}; tree.weight = 0.5; auto oblivious_tree = ToObliviousTree(tree); ASSERT_TRUE(oblivious_tree.has_value()); EXPECT_THAT(oblivious_tree->layer_splits, ElementsAre()); EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(1.0)); } { DecisionTree tree; tree.split_nodes = {{A(0), A(1), Cond(0, -inf, 1.0)}}; tree.adjustments = {7.0, 3.0}; tree.weight = 2.0; auto oblivious_tree = ToObliviousTree(tree); ASSERT_TRUE(oblivious_tree.has_value()); EXPECT_EQ(oblivious_tree->layer_splits.size(), 1); EXPECT_EQ(*oblivious_tree->layer_splits[0], *Cond(0, -inf, 1.0)); EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(14.0, 6.0)); } { DecisionTree tree; tree.split_nodes = {{S(2), S(1), Cond(0, -inf, 1.0)}, {A(1), A(2), Cond(0, -1.0, inf)}, {A(0), A(3), Cond(0, -1.0, inf)}}; tree.adjustments = {0.0, 1.0, 2.0, 3.0}; auto oblivious_tree = ToObliviousTree(tree); ASSERT_TRUE(oblivious_tree.has_value()); EXPECT_EQ(oblivious_tree->layer_splits.size(), 2); EXPECT_EQ(*oblivious_tree->layer_splits[0], *Cond(0, -inf, 1.0)); EXPECT_EQ(*oblivious_tree->layer_splits[1], *Cond(0, -1.0, inf)); EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(0.0, 3.0, 1.0, 2.0)); } }
#include "quiche/http2/test_tools/hpack_block_builder.h" #include "quiche/http2/hpack/varint/hpack_varint_encoder.h" #include "quiche/common/platform/api/quiche_bug_tracker.h" #include "quiche/common/platform/api/quiche_test.h" namespace http2 { namespace test { void HpackBlockBuilder::AppendHighBitsAndVarint(uint8_t high_bits, uint8_t prefix_length, uint64_t varint) { EXPECT_LE(3, prefix_length); EXPECT_LE(prefix_length, 8); HpackVarintEncoder::Encode(high_bits, prefix_length, varint, &buffer_); } void HpackBlockBuilder::AppendEntryTypeAndVarint(HpackEntryType entry_type, uint64_t varint) { uint8_t high_bits; uint8_t prefix_length; switch (entry_type) { case HpackEntryType::kIndexedHeader: high_bits = 0x80; prefix_length = 7; break; case HpackEntryType::kDynamicTableSizeUpdate: high_bits = 0x20; prefix_length = 5; break; case HpackEntryType::kIndexedLiteralHeader: high_bits = 0x40; prefix_length = 6; break; case HpackEntryType::kUnindexedLiteralHeader: high_bits = 0x00; prefix_length = 4; break; case HpackEntryType::kNeverIndexedLiteralHeader: high_bits = 0x10; prefix_length = 4; break; default: QUICHE_BUG(http2_bug_110_1) << "Unreached, entry_type=" << entry_type; high_bits = 0; prefix_length = 0; break; } AppendHighBitsAndVarint(high_bits, prefix_length, varint); } void HpackBlockBuilder::AppendString(bool is_huffman_encoded, absl::string_view str) { uint8_t high_bits = is_huffman_encoded ? 0x80 : 0; uint8_t prefix_length = 7; AppendHighBitsAndVarint(high_bits, prefix_length, str.size()); buffer_.append(str.data(), str.size()); } } }
#include "quiche/http2/test_tools/hpack_block_builder.h" #include <string> #include "absl/strings/escaping.h" #include "quiche/common/platform/api/quiche_test.h" namespace http2 { namespace test { namespace { const bool kUncompressed = false; const bool kCompressed = true; const uint32_t kStaticTableMethodGET = 2; const uint32_t kStaticTablePathSlash = 4; const uint32_t kStaticTableSchemeHttp = 6; TEST(HpackBlockBuilderTest, ExamplesFromSpecC2) { { HpackBlockBuilder b; b.AppendLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader, kUncompressed, "custom-key", kUncompressed, "custom-header"); EXPECT_EQ(26u, b.size()); const char kExpected[] = "\x40" "\x0a" "custom-key" "\x0d" "custom-header"; EXPECT_EQ(kExpected, b.buffer()); } { HpackBlockBuilder b; b.AppendNameIndexAndLiteralValue(HpackEntryType::kUnindexedLiteralHeader, 4, kUncompressed, "/sample/path"); EXPECT_EQ(14u, b.size()); const char kExpected[] = "\x04" "\x0c" "/sample/path"; EXPECT_EQ(kExpected, b.buffer()); } { HpackBlockBuilder b; b.AppendLiteralNameAndValue(HpackEntryType::kNeverIndexedLiteralHeader, kUncompressed, "password", kUncompressed, "secret"); EXPECT_EQ(17u, b.size()); const char kExpected[] = "\x10" "\x08" "password" "\x06" "secret"; EXPECT_EQ(kExpected, b.buffer()); } { HpackBlockBuilder b; b.AppendIndexedHeader(2); EXPECT_EQ(1u, b.size()); const char kExpected[] = "\x82"; EXPECT_EQ(kExpected, b.buffer()); } } TEST(HpackBlockBuilderTest, ExamplesFromSpecC3) { { HpackBlockBuilder b; b.AppendIndexedHeader(2); b.AppendIndexedHeader(6); b.AppendIndexedHeader(4); b.AppendNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader, 1, kUncompressed, "www.example.com"); EXPECT_EQ(20u, b.size()); std::string expected; ASSERT_TRUE(absl::HexStringToBytes( "828684410f7777772e6578616d706c652e636f6d", &expected)); EXPECT_EQ(expected, b.buffer()); } } TEST(HpackBlockBuilderTest, ExamplesFromSpecC4) { { HpackBlockBuilder b; b.AppendIndexedHeader(kStaticTableMethodGET); b.AppendIndexedHeader(kStaticTableSchemeHttp); b.AppendIndexedHeader(kStaticTablePathSlash); const char kHuffmanWwwExampleCom[] = {'\xf1', '\xe3', '\xc2', '\xe5', '\xf2', '\x3a', '\x6b', '\xa0', '\xab', '\x90', '\xf4', '\xff'}; b.AppendNameIndexAndLiteralValue( HpackEntryType::kIndexedLiteralHeader, 1, kCompressed, absl::string_view(kHuffmanWwwExampleCom, sizeof kHuffmanWwwExampleCom)); EXPECT_EQ(17u, b.size()); std::string expected; ASSERT_TRUE(absl::HexStringToBytes("828684418cf1e3c2e5f23a6ba0ab90f4ff", &expected)); EXPECT_EQ(expected, b.buffer()); } } TEST(HpackBlockBuilderTest, DynamicTableSizeUpdate) { { HpackBlockBuilder b; b.AppendDynamicTableSizeUpdate(0); EXPECT_EQ(1u, b.size()); const char kData[] = {'\x20'}; absl::string_view expected(kData, sizeof kData); EXPECT_EQ(expected, b.buffer()); } { HpackBlockBuilder b; b.AppendDynamicTableSizeUpdate(4096); EXPECT_EQ(3u, b.size()); const char kData[] = {'\x3f', '\xe1', '\x1f'}; absl::string_view expected(kData, sizeof kData); EXPECT_EQ(expected, b.buffer()); } { HpackBlockBuilder b; b.AppendDynamicTableSizeUpdate(1000000000000); EXPECT_EQ(7u, b.size()); const char kData[] = {'\x3f', '\xe1', '\x9f', '\x94', '\xa5', '\x8d', '\x1d'}; absl::string_view expected(kData, sizeof kData); EXPECT_EQ(expected, b.buffer()); } } } } }
void HpackBlockBuilder::AppendEntryTypeAndVarint(HpackEntryType entry_type, uint64_t varint) { uint8_t high_bits; uint8_t prefix_length; switch (entry_type) { case HpackEntryType::kIndexedHeader: high_bits = 0x80; prefix_length = 7; break; case HpackEntryType::kDynamicTableSizeUpdate: high_bits = 0x20; prefix_length = 5; break; case HpackEntryType::kIndexedLiteralHeader: high_bits = 0x40; prefix_length = 6; break; case HpackEntryType::kUnindexedLiteralHeader: high_bits = 0x00; prefix_length = 4; break; case HpackEntryType::kNeverIndexedLiteralHeader: high_bits = 0x10; prefix_length = 4; break; default: QUICHE_BUG(http2_bug_110_1) << "Unreached, entry_type=" << entry_type; high_bits = 0; prefix_length = 0; break; } AppendHighBitsAndVarint(high_bits, prefix_length, varint); }
TEST(HpackBlockBuilderTest, ExamplesFromSpecC2) { { HpackBlockBuilder b; b.AppendLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader, kUncompressed, "custom-key", kUncompressed, "custom-header"); EXPECT_EQ(26u, b.size()); const char kExpected[] = "\x40" "\x0a" "custom-key" "\x0d" "custom-header"; EXPECT_EQ(kExpected, b.buffer()); } { HpackBlockBuilder b; b.AppendNameIndexAndLiteralValue(HpackEntryType::kUnindexedLiteralHeader, 4, kUncompressed, "/sample/path"); EXPECT_EQ(14u, b.size()); const char kExpected[] = "\x04" "\x0c" "/sample/path"; EXPECT_EQ(kExpected, b.buffer()); } { HpackBlockBuilder b; b.AppendLiteralNameAndValue(HpackEntryType::kNeverIndexedLiteralHeader, kUncompressed, "password", kUncompressed, "secret"); EXPECT_EQ(17u, b.size()); const char kExpected[] = "\x10" "\x08" "password" "\x06" "secret"; EXPECT_EQ(kExpected, b.buffer()); } { HpackBlockBuilder b; b.AppendIndexedHeader(2); EXPECT_EQ(1u, b.size()); const char kExpected[] = "\x82"; EXPECT_EQ(kExpected, b.buffer()); } } TEST(HpackBlockBuilderTest, ExamplesFromSpecC3) { { HpackBlockBuilder b; b.AppendIndexedHeader(2); b.AppendIndexedHeader(6); b.AppendIndexedHeader(4); b.AppendNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader, 1, kUncompressed, "www.example.com"); EXPECT_EQ(20u, b.size()); std::string expected; ASSERT_TRUE(absl::HexStringToBytes( "828684410f7777772e6578616d706c652e636f6d", &expected)); EXPECT_EQ(expected, b.buffer()); } } TEST(HpackBlockBuilderTest, ExamplesFromSpecC4) { { HpackBlockBuilder b; b.AppendIndexedHeader(kStaticTableMethodGET); b.AppendIndexedHeader(kStaticTableSchemeHttp); b.AppendIndexedHeader(kStaticTablePathSlash); const char kHuffmanWwwExampleCom[] = {'\xf1', '\xe3', '\xc2', '\xe5', '\xf2', '\x3a', '\x6b', '\xa0', '\xab', '\x90', '\xf4', '\xff'}; b.AppendNameIndexAndLiteralValue( HpackEntryType::kIndexedLiteralHeader, 1, kCompressed, absl::string_view(kHuffmanWwwExampleCom, sizeof kHuffmanWwwExampleCom)); EXPECT_EQ(17u, b.size()); std::string expected; ASSERT_TRUE(absl::HexStringToBytes("828684418cf1e3c2e5f23a6ba0ab90f4ff", &expected)); EXPECT_EQ(expected, b.buffer()); } } TEST(HpackBlockBuilderTest, DynamicTableSizeUpdate) { { HpackBlockBuilder b; b.AppendDynamicTableSizeUpdate(0); EXPECT_EQ(1u, b.size()); const char kData[] = {'\x20'}; absl::string_view expected(kData, sizeof kData); EXPECT_EQ(expected, b.buffer()); } { HpackBlockBuilder b; b.AppendDynamicTableSizeUpdate(4096); EXPECT_EQ(3u, b.size()); const char kData[] = {'\x3f', '\xe1', '\x1f'}; absl::string_view expected(kData, sizeof kData); EXPECT_EQ(expected, b.buffer()); } { HpackBlockBuilder b; b.AppendDynamicTableSizeUpdate(1000000000000); EXPECT_EQ(7u, b.size()); const char kData[] = {'\x3f', '\xe1', '\x9f', '\x94', '\xa5', '\x8d', '\x1d'}; absl::string_view expected(kData, sizeof kData); EXPECT_EQ(expected, b.buffer()); } }
#include "eval/public/ast_rewrite.h" #include <stack> #include <vector> #include "google/api/expr/v1alpha1/syntax.pb.h" #include "absl/log/absl_log.h" #include "absl/types/variant.h" #include "eval/public/ast_visitor.h" #include "eval/public/source_position.h" namespace google::api::expr::runtime { using google::api::expr::v1alpha1::Expr; using google::api::expr::v1alpha1::SourceInfo; using Ident = google::api::expr::v1alpha1::Expr::Ident; using Select = google::api::expr::v1alpha1::Expr::Select; using Call = google::api::expr::v1alpha1::Expr::Call; using CreateList = google::api::expr::v1alpha1::Expr::CreateList; using CreateStruct = google::api::expr::v1alpha1::Expr::CreateStruct; using Comprehension = google::api::expr::v1alpha1::Expr::Comprehension; namespace { struct ArgRecord { Expr* expr; const SourceInfo* source_info; const Expr* calling_expr; int call_arg; }; struct ComprehensionRecord { Expr* expr; const SourceInfo* source_info; const Comprehension* comprehension; const Expr* comprehension_expr; ComprehensionArg comprehension_arg; bool use_comprehension_callbacks; }; struct ExprRecord { Expr* expr; const SourceInfo* source_info; }; using StackRecordKind = absl::variant<ExprRecord, ArgRecord, ComprehensionRecord>; struct StackRecord { public: ABSL_ATTRIBUTE_UNUSED static constexpr int kNotCallArg = -1; static constexpr int kTarget = -2; StackRecord(Expr* e, const SourceInfo* info) { ExprRecord record; record.expr = e; record.source_info = info; record_variant = record; } StackRecord(Expr* e, const SourceInfo* info, Comprehension* comprehension, Expr* comprehension_expr, ComprehensionArg comprehension_arg, bool use_comprehension_callbacks) { if (use_comprehension_callbacks) { ComprehensionRecord record; record.expr = e; record.source_info = info; record.comprehension = comprehension; record.comprehension_expr = comprehension_expr; record.comprehension_arg = comprehension_arg; record.use_comprehension_callbacks = use_comprehension_callbacks; record_variant = record; return; } ArgRecord record; record.expr = e; record.source_info = info; record.calling_expr = comprehension_expr; record.call_arg = comprehension_arg; record_variant = record; } StackRecord(Expr* e, const SourceInfo* info, const Expr* call, int argnum) { ArgRecord record; record.expr = e; record.source_info = info; record.calling_expr = call; record.call_arg = argnum; record_variant = record; } Expr* expr() const { return absl::get<ExprRecord>(record_variant).expr; } const SourceInfo* source_info() const { return absl::get<ExprRecord>(record_variant).source_info; } bool IsExprRecord() const { return absl::holds_alternative<ExprRecord>(record_variant); } StackRecordKind record_variant; bool visited = false; }; struct PreVisitor { void operator()(const ExprRecord& record) { Expr* expr = record.expr; const SourcePosition position(expr->id(), record.source_info); visitor->PreVisitExpr(expr, &position); switch (expr->expr_kind_case()) { case Expr::kSelectExpr: visitor->PreVisitSelect(&expr->select_expr(), expr, &position); break; case Expr::kCallExpr: visitor->PreVisitCall(&expr->call_expr(), expr, &position); break; case Expr::kComprehensionExpr: visitor->PreVisitComprehension(&expr->comprehension_expr(), expr, &position); break; default: break; } } void operator()(const ArgRecord&) {} void operator()(const ComprehensionRecord& record) { Expr* expr = record.expr; const SourcePosition position(expr->id(), record.source_info); visitor->PreVisitComprehensionSubexpression( expr, record.comprehension, record.comprehension_arg, &position); } AstVisitor* visitor; }; void PreVisit(const StackRecord& record, AstVisitor* visitor) { absl::visit(PreVisitor{visitor}, record.record_variant); } struct PostVisitor { void operator()(const ExprRecord& record) { Expr* expr = record.expr; const SourcePosition position(expr->id(), record.source_info); switch (expr->expr_kind_case()) { case Expr::kConstExpr: visitor->PostVisitConst(&expr->const_expr(), expr, &position); break; case Expr::kIdentExpr: visitor->PostVisitIdent(&expr->ident_expr(), expr, &position); break; case Expr::kSelectExpr: visitor->PostVisitSelect(&expr->select_expr(), expr, &position); break; case Expr::kCallExpr: visitor->PostVisitCall(&expr->call_expr(), expr, &position); break; case Expr::kListExpr: visitor->PostVisitCreateList(&expr->list_expr(), expr, &position); break; case Expr::kStructExpr: visitor->PostVisitCreateStruct(&expr->struct_expr(), expr, &position); break; case Expr::kComprehensionExpr: visitor->PostVisitComprehension(&expr->comprehension_expr(), expr, &position); break; case Expr::EXPR_KIND_NOT_SET: break; default: ABSL_LOG(ERROR) << "Unsupported Expr kind: " << expr->expr_kind_case(); } visitor->PostVisitExpr(expr, &position); } void operator()(const ArgRecord& record) { Expr* expr = record.expr; const SourcePosition position(expr->id(), record.source_info); if (record.call_arg == StackRecord::kTarget) { visitor->PostVisitTarget(record.calling_expr, &position); } else { visitor->PostVisitArg(record.call_arg, record.calling_expr, &position); } } void operator()(const ComprehensionRecord& record) { Expr* expr = record.expr; const SourcePosition position(expr->id(), record.source_info); visitor->PostVisitComprehensionSubexpression( expr, record.comprehension, record.comprehension_arg, &position); } AstVisitor* visitor; }; void PostVisit(const StackRecord& record, AstVisitor* visitor) { absl::visit(PostVisitor{visitor}, record.record_variant); } void PushSelectDeps(Select* select_expr, const SourceInfo* source_info, std::stack<StackRecord>* stack) { if (select_expr->has_operand()) { stack->push(StackRecord(select_expr->mutable_operand(), source_info)); } } void PushCallDeps(Call* call_expr, Expr* expr, const SourceInfo* source_info, std::stack<StackRecord>* stack) { const int arg_size = call_expr->args_size(); for (int i = arg_size - 1; i >= 0; --i) { stack->push(StackRecord(call_expr->mutable_args(i), source_info, expr, i)); } if (call_expr->has_target()) { stack->push(StackRecord(call_expr->mutable_target(), source_info, expr, StackRecord::kTarget)); } } void PushListDeps(CreateList* list_expr, const SourceInfo* source_info, std::stack<StackRecord>* stack) { auto& elements = *list_expr->mutable_elements(); for (auto it = elements.rbegin(); it != elements.rend(); ++it) { auto& element = *it; stack->push(StackRecord(&element, source_info)); } } void PushStructDeps(CreateStruct* struct_expr, const SourceInfo* source_info, std::stack<StackRecord>* stack) { auto& entries = *struct_expr->mutable_entries(); for (auto it = entries.rbegin(); it != entries.rend(); ++it) { auto& entry = *it; if (entry.has_value()) { stack->push(StackRecord(entry.mutable_value(), source_info)); } if (entry.has_map_key()) { stack->push(StackRecord(entry.mutable_map_key(), source_info)); } } } void PushComprehensionDeps(Comprehension* c, Expr* expr, const SourceInfo* source_info, std::stack<StackRecord>* stack, bool use_comprehension_callbacks) { StackRecord iter_range(c->mutable_iter_range(), source_info, c, expr, ITER_RANGE, use_comprehension_callbacks); StackRecord accu_init(c->mutable_accu_init(), source_info, c, expr, ACCU_INIT, use_comprehension_callbacks); StackRecord loop_condition(c->mutable_loop_condition(), source_info, c, expr, LOOP_CONDITION, use_comprehension_callbacks); StackRecord loop_step(c->mutable_loop_step(), source_info, c, expr, LOOP_STEP, use_comprehension_callbacks); StackRecord result(c->mutable_result(), source_info, c, expr, RESULT, use_comprehension_callbacks); stack->push(result); stack->push(loop_step); stack->push(loop_condition); stack->push(accu_init); stack->push(iter_range); } struct PushDepsVisitor { void operator()(const ExprRecord& record) { Expr* expr = record.expr; switch (expr->expr_kind_case()) { case Expr::kSelectExpr: PushSelectDeps(expr->mutable_select_expr(), record.source_info, &stack); break; case Expr::kCallExpr: PushCallDeps(expr->mutable_call_expr(), expr, record.source_info, &stack); break; case Expr::kListExpr: PushListDeps(expr->mutable_list_expr(), record.source_info, &stack); break; case Expr::kStructExpr: PushStructDeps(expr->mutable_struct_expr(), record.source_info, &stack); break; case Expr::kComprehensionExpr: PushComprehensionDeps(expr->mutable_comprehension_expr(), expr, record.source_info, &stack, options.use_comprehension_callbacks); break; default: break; } } void operator()(const ArgRecord& record) { stack.push(StackRecord(record.expr, record.source_info)); } void operator()(const ComprehensionRecord& record) { stack.push(StackRecord(record.expr, record.source_info)); } std::stack<StackRecord>& stack; const RewriteTraversalOptions& options; }; void PushDependencies(const StackRecord& record, std::stack<StackRecord>& stack, const RewriteTraversalOptions& options) { absl::visit(PushDepsVisitor{stack, options}, record.record_variant); } } bool AstRewrite(Expr* expr, const SourceInfo* source_info, AstRewriter* visitor) { return AstRewrite(expr, source_info, visitor, RewriteTraversalOptions{}); } bool AstRewrite(Expr* expr, const SourceInfo* source_info, AstRewriter* visitor, RewriteTraversalOptions options) { std::stack<StackRecord> stack; std::vector<const Expr*> traversal_path; stack.push(StackRecord(expr, source_info)); bool rewritten = false; while (!stack.empty()) { StackRecord& record = stack.top(); if (!record.visited) { if (record.IsExprRecord()) { traversal_path.push_back(record.expr()); visitor->TraversalStackUpdate(absl::MakeSpan(traversal_path)); SourcePosition pos(record.expr()->id(), record.source_info()); if (visitor->PreVisitRewrite(record.expr(), &pos)) { rewritten = true; } } PreVisit(record, visitor); PushDependencies(record, stack, options); record.visited = true; } else { PostVisit(record, visitor); if (record.IsExprRecord()) { SourcePosition pos(record.expr()->id(), record.source_info()); if (visitor->PostVisitRewrite(record.expr(), &pos)) { rewritten = true; } traversal_path.pop_back(); visitor->TraversalStackUpdate(absl::MakeSpan(traversal_path)); } stack.pop(); } } return rewritten; } }
#include "eval/public/ast_rewrite.h" #include <string> #include <vector> #include "google/api/expr/v1alpha1/syntax.pb.h" #include "eval/public/ast_visitor.h" #include "eval/public/source_position.h" #include "internal/testing.h" #include "parser/parser.h" #include "testutil/util.h" namespace google::api::expr::runtime { namespace { using ::google::api::expr::v1alpha1::Constant; using ::google::api::expr::v1alpha1::Expr; using ::google::api::expr::v1alpha1::ParsedExpr; using ::google::api::expr::v1alpha1::SourceInfo; using testing::_; using testing::ElementsAre; using testing::InSequence; using Ident = google::api::expr::v1alpha1::Expr::Ident; using Select = google::api::expr::v1alpha1::Expr::Select; using Call = google::api::expr::v1alpha1::Expr::Call; using CreateList = google::api::expr::v1alpha1::Expr::CreateList; using CreateStruct = google::api::expr::v1alpha1::Expr::CreateStruct; using Comprehension = google::api::expr::v1alpha1::Expr::Comprehension; class MockAstRewriter : public AstRewriter { public: MOCK_METHOD(void, PreVisitExpr, (const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitExpr, (const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitConst, (const Constant* const_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitIdent, (const Ident* ident_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PreVisitSelect, (const Select* select_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitSelect, (const Select* select_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PreVisitCall, (const Call* call_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitCall, (const Call* call_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PreVisitComprehension, (const Comprehension* comprehension_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitComprehension, (const Comprehension* comprehension_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PreVisitComprehensionSubexpression, (const Expr* expr, const Comprehension* comprehension_expr, ComprehensionArg comprehension_arg, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitComprehensionSubexpression, (const Expr* expr, const Comprehension* comprehension_expr, ComprehensionArg comprehension_arg, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitTarget, (const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitArg, (int arg_num, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitCreateList, (const CreateList* list_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(void, PostVisitCreateStruct, (const CreateStruct* struct_expr, const Expr* expr, const SourcePosition* position), (override)); MOCK_METHOD(bool, PreVisitRewrite, (Expr * expr, const SourcePosition* position), (override)); MOCK_METHOD(bool, PostVisitRewrite, (Expr * expr, const SourcePosition* position), (override)); MOCK_METHOD(void, TraversalStackUpdate, (absl::Span<const Expr*> path), (override)); }; TEST(AstCrawlerTest, CheckCrawlConstant) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto const_expr = expr.mutable_const_expr(); EXPECT_CALL(handler, PostVisitConst(const_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlIdent) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto ident_expr = expr.mutable_ident_expr(); EXPECT_CALL(handler, PostVisitIdent(ident_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlSelectNotCrashingPostVisitAbsentOperand) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto select_expr = expr.mutable_select_expr(); EXPECT_CALL(handler, PostVisitSelect(select_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlSelect) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto select_expr = expr.mutable_select_expr(); auto operand = select_expr->mutable_operand(); auto ident_expr = operand->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PostVisitIdent(ident_expr, operand, _)).Times(1); EXPECT_CALL(handler, PostVisitSelect(select_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlCallNoReceiver) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto* call_expr = expr.mutable_call_expr(); Expr* arg0 = call_expr->add_args(); auto* const_expr = arg0->mutable_const_expr(); Expr* arg1 = call_expr->add_args(); auto* ident_expr = arg1->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PreVisitCall(call_expr, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitTarget(_, _)).Times(0); EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(0, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(1, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitCall(call_expr, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(&expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlCallReceiver) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto* call_expr = expr.mutable_call_expr(); Expr* target = call_expr->mutable_target(); auto* target_ident = target->mutable_ident_expr(); Expr* arg0 = call_expr->add_args(); auto* const_expr = arg0->mutable_const_expr(); Expr* arg1 = call_expr->add_args(); auto* ident_expr = arg1->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PreVisitCall(call_expr, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(target_ident, target, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(target, _)).Times(1); EXPECT_CALL(handler, PostVisitTarget(&expr, _)).Times(1); EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(0, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(1, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitCall(call_expr, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(&expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlComprehension) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto c = expr.mutable_comprehension_expr(); auto iter_range = c->mutable_iter_range(); auto iter_range_expr = iter_range->mutable_const_expr(); auto accu_init = c->mutable_accu_init(); auto accu_init_expr = accu_init->mutable_ident_expr(); auto loop_condition = c->mutable_loop_condition(); auto loop_condition_expr = loop_condition->mutable_const_expr(); auto loop_step = c->mutable_loop_step(); auto loop_step_expr = loop_step->mutable_ident_expr(); auto result = c->mutable_result(); auto result_expr = result->mutable_const_expr(); testing::InSequence seq; EXPECT_CALL(handler, PreVisitComprehension(c, &expr, _)).Times(1); EXPECT_CALL(handler, PreVisitComprehensionSubexpression(iter_range, c, ITER_RANGE, _)) .Times(1); EXPECT_CALL(handler, PostVisitConst(iter_range_expr, iter_range, _)).Times(1); EXPECT_CALL(handler, PostVisitComprehensionSubexpression(iter_range, c, ITER_RANGE, _)) .Times(1); EXPECT_CALL(handler, PreVisitComprehensionSubexpression(accu_init, c, ACCU_INIT, _)) .Times(1); EXPECT_CALL(handler, PostVisitIdent(accu_init_expr, accu_init, _)).Times(1); EXPECT_CALL(handler, PostVisitComprehensionSubexpression(accu_init, c, ACCU_INIT, _)) .Times(1); EXPECT_CALL(handler, PreVisitComprehensionSubexpression(loop_condition, c, LOOP_CONDITION, _)) .Times(1); EXPECT_CALL(handler, PostVisitConst(loop_condition_expr, loop_condition, _)) .Times(1); EXPECT_CALL(handler, PostVisitComprehensionSubexpression(loop_condition, c, LOOP_CONDITION, _)) .Times(1); EXPECT_CALL(handler, PreVisitComprehensionSubexpression(loop_step, c, LOOP_STEP, _)) .Times(1); EXPECT_CALL(handler, PostVisitIdent(loop_step_expr, loop_step, _)).Times(1); EXPECT_CALL(handler, PostVisitComprehensionSubexpression(loop_step, c, LOOP_STEP, _)) .Times(1); EXPECT_CALL(handler, PreVisitComprehensionSubexpression(result, c, RESULT, _)) .Times(1); EXPECT_CALL(handler, PostVisitConst(result_expr, result, _)).Times(1); EXPECT_CALL(handler, PostVisitComprehensionSubexpression(result, c, RESULT, _)) .Times(1); EXPECT_CALL(handler, PostVisitComprehension(c, &expr, _)).Times(1); RewriteTraversalOptions opts; opts.use_comprehension_callbacks = true; AstRewrite(&expr, &source_info, &handler, opts); } TEST(AstCrawlerTest, CheckCrawlComprehensionLegacyCallbacks) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto c = expr.mutable_comprehension_expr(); auto iter_range = c->mutable_iter_range(); auto iter_range_expr = iter_range->mutable_const_expr(); auto accu_init = c->mutable_accu_init(); auto accu_init_expr = accu_init->mutable_ident_expr(); auto loop_condition = c->mutable_loop_condition(); auto loop_condition_expr = loop_condition->mutable_const_expr(); auto loop_step = c->mutable_loop_step(); auto loop_step_expr = loop_step->mutable_ident_expr(); auto result = c->mutable_result(); auto result_expr = result->mutable_const_expr(); testing::InSequence seq; EXPECT_CALL(handler, PreVisitComprehension(c, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitConst(iter_range_expr, iter_range, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(ITER_RANGE, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(accu_init_expr, accu_init, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(ACCU_INIT, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitConst(loop_condition_expr, loop_condition, _)) .Times(1); EXPECT_CALL(handler, PostVisitArg(LOOP_CONDITION, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(loop_step_expr, loop_step, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(LOOP_STEP, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitConst(result_expr, result, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(RESULT, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitComprehension(c, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCreateList) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto list_expr = expr.mutable_list_expr(); auto arg0 = list_expr->add_elements(); auto const_expr = arg0->mutable_const_expr(); auto arg1 = list_expr->add_elements(); auto ident_expr = arg1->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitCreateList(list_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCreateStruct) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto struct_expr = expr.mutable_struct_expr(); auto entry0 = struct_expr->add_entries(); auto key = entry0->mutable_map_key()->mutable_const_expr(); auto value = entry0->mutable_value()->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PostVisitConst(key, &entry0->map_key(), _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(value, &entry0->value(), _)).Times(1); EXPECT_CALL(handler, PostVisitCreateStruct(struct_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckExprHandlers) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto struct_expr = expr.mutable_struct_expr(); auto entry0 = struct_expr->add_entries(); entry0->mutable_map_key()->mutable_const_expr(); entry0->mutable_value()->mutable_ident_expr(); EXPECT_CALL(handler, PreVisitExpr(_, _)).Times(3); EXPECT_CALL(handler, PostVisitExpr(_, _)).Times(3); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckExprRewriteHandlers) { SourceInfo source_info; MockAstRewriter handler; Expr select_expr; select_expr.mutable_select_expr()->set_field("var"); auto* inner_select_expr = select_expr.mutable_select_expr()->mutable_operand(); inner_select_expr->mutable_select_expr()->set_field("mid"); auto* ident = inner_select_expr->mutable_select_expr()->mutable_operand(); ident->mutable_ident_expr()->set_name("top"); { InSequence sequence; EXPECT_CALL(handler, TraversalStackUpdate(testing::ElementsAre(&select_expr))); EXPECT_CALL(handler, PreVisitRewrite(&select_expr, _)); EXPECT_CALL(handler, TraversalStackUpdate(testing::ElementsAre( &select_expr, inner_select_expr))); EXPECT_CALL(handler, PreVisitRewrite(inner_select_expr, _)); EXPECT_CALL(handler, TraversalStackUpdate(testing::ElementsAre( &select_expr, inner_select_expr, ident))); EXPECT_CALL(handler, PreVisitRewrite(ident, _)); EXPECT_CALL(handler, PostVisitRewrite(ident, _)); EXPECT_CALL(handler, TraversalStackUpdate(testing::ElementsAre( &select_expr, inner_select_expr))); EXPECT_CALL(handler, PostVisitRewrite(inner_select_expr, _)); EXPECT_CALL(handler, TraversalStackUpdate(testing::ElementsAre(&select_expr))); EXPECT_CALL(handler, PostVisitRewrite(&select_expr, _)); EXPECT_CALL(handler, TraversalStackUpdate(testing::IsEmpty())); } EXPECT_FALSE(AstRewrite(&select_expr, &source_info, &handler)); } class RewriterExample : public AstRewriterBase { public: RewriterExample() {} bool PostVisitRewrite(Expr* expr, const SourcePosition* info) override { if (target_.has_value() && expr->id() == *target_) { expr->mutable_ident_expr()->set_name("com.google.Identifier"); return true; } return false; } void PostVisitIdent(const Ident* ident, const Expr* expr, const SourcePosition* pos) override { if (path_.size() >= 3) { if (ident->name() == "com") { const Expr* p1 = path_.at(path_.size() - 2); const Expr* p2 = path_.at(path_.size() - 3); if (p1->has_select_expr() && p1->select_expr().field() == "google" && p2->has_select_expr() && p2->select_expr().field() == "Identifier") { target_ = p2->id(); } } } } void TraversalStackUpdate(absl::Span<const Expr*> path) override { path_ = path; } private: absl::Span<const Expr*> path_; absl::optional<int64_t> target_; }; TEST(AstRewrite, SelectRewriteExample) { ASSERT_OK_AND_ASSIGN(ParsedExpr parsed, parser::Parse("com.google.Identifier")); RewriterExample example; ASSERT_TRUE( AstRewrite(parsed.mutable_expr(), &parsed.source_info(), &example)); EXPECT_THAT(parsed.expr(), testutil::EqualsProto(R"pb( id: 3 ident_expr { name: "com.google.Identifier" } )pb")); } class PreRewriterExample : public AstRewriterBase { public: PreRewriterExample() {} bool PreVisitRewrite(Expr* expr, const SourcePosition* info) override { if (expr->ident_expr().name() == "x") { expr->mutable_ident_expr()->set_name("y"); return true; } return false; } bool PostVisitRewrite(Expr* expr, const SourcePosition* info) override { if (expr->ident_expr().name() == "y") { expr->mutable_ident_expr()->set_name("z"); return true; } return false; } void PostVisitIdent(const Ident* ident, const Expr* expr, const SourcePosition* pos) override { visited_idents_.push_back(ident->name()); } const std::vector<std::string>& visited_idents() const { return visited_idents_; } private: std::vector<std::string> visited_idents_; }; TEST(AstRewrite, PreAndPostVisitExpample) { ASSERT_OK_AND_ASSIGN(ParsedExpr parsed, parser::Parse("x")); PreRewriterExample visitor; ASSERT_TRUE( AstRewrite(parsed.mutable_expr(), &parsed.source_info(), &visitor)); EXPECT_THAT(parsed.expr(), testutil::EqualsProto(R"pb( id: 1 ident_expr { name: "z" } )pb")); EXPECT_THAT(visitor.visited_idents(), ElementsAre("y")); } } }
void PostVisit(const StackRecord& record, AstVisitor* visitor) { absl::visit(PostVisitor{visitor}, record.record_variant); }
TEST(AstCrawlerTest, CheckCrawlConstant) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto const_expr = expr.mutable_const_expr(); EXPECT_CALL(handler, PostVisitConst(const_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlIdent) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto ident_expr = expr.mutable_ident_expr(); EXPECT_CALL(handler, PostVisitIdent(ident_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlSelectNotCrashingPostVisitAbsentOperand) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto select_expr = expr.mutable_select_expr(); EXPECT_CALL(handler, PostVisitSelect(select_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlSelect) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto select_expr = expr.mutable_select_expr(); auto operand = select_expr->mutable_operand(); auto ident_expr = operand->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PostVisitIdent(ident_expr, operand, _)).Times(1); EXPECT_CALL(handler, PostVisitSelect(select_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlCallNoReceiver) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto* call_expr = expr.mutable_call_expr(); Expr* arg0 = call_expr->add_args(); auto* const_expr = arg0->mutable_const_expr(); Expr* arg1 = call_expr->add_args(); auto* ident_expr = arg1->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PreVisitCall(call_expr, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitTarget(_, _)).Times(0); EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(0, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(1, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitCall(call_expr, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(&expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCrawlCallReceiver) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto* call_expr = expr.mutable_call_expr(); Expr* target = call_expr->mutable_target(); auto* target_ident = target->mutable_ident_expr(); Expr* arg0 = call_expr->add_args(); auto* const_expr = arg0->mutable_const_expr(); Expr* arg1 = call_expr->add_args(); auto* ident_expr = arg1->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PreVisitCall(call_expr, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(target_ident, target, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(target, _)).Times(1); EXPECT_CALL(handler, PostVisitTarget(&expr, _)).Times(1); EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(0, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitArg(1, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitCall(call_expr, &expr, _)).Times(1); EXPECT_CALL(handler, PostVisitExpr(&expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCreateList) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto list_expr = expr.mutable_list_expr(); auto arg0 = list_expr->add_elements(); auto const_expr = arg0->mutable_const_expr(); auto arg1 = list_expr->add_elements(); auto ident_expr = arg1->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1); EXPECT_CALL(handler, PostVisitCreateList(list_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); } TEST(AstCrawlerTest, CheckCreateStruct) { SourceInfo source_info; MockAstRewriter handler; Expr expr; auto struct_expr = expr.mutable_struct_expr(); auto entry0 = struct_expr->add_entries(); auto key = entry0->mutable_map_key()->mutable_const_expr(); auto value = entry0->mutable_value()->mutable_ident_expr(); testing::InSequence seq; EXPECT_CALL(handler, PostVisitConst(key, &entry0->map_key(), _)).Times(1); EXPECT_CALL(handler, PostVisitIdent(value, &entry0->value(), _)).Times(1); EXPECT_CALL(handler, PostVisitCreateStruct(struct_expr, &expr, _)).Times(1); AstRewrite(&expr, &source_info, &handler); }
#include "xla/service/gpu/split_k_gemm_rewriter.h" #include <cmath> #include <cstdint> #include <iterator> #include <stack> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/cord.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/layout.h" #include "xla/literal_util.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/gpu/triton_support.h" #include "xla/service/gpu/triton_tiling_propagation.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { bool HasDivisibleSuffixAllowingSplit(const absl::Span<int64_t const> span, const int64_t divisor) { CHECK_GE(divisor, 1); int64_t product = 1; for (auto it = span.crbegin(); it != span.crend(); ++it) { product *= *it; if (product % divisor == 0) { return true; } if (divisor % product != 0) { return false; } } return false; } namespace { void CopyIncrementingAboveThreshold( const tsl::protobuf::RepeatedField<int64_t>& source, tsl::protobuf::RepeatedField<int64_t>& destination, const int threshold) { destination.Reserve(source.size()); for (int64_t x : source) { if (x >= threshold) { ++x; } destination.Add(x); } } void CopyIncrementingAboveThreshold(absl::Span<const int64_t> source, DimensionVector& destination, const int threshold) { destination.reserve(source.size()); for (int64_t x : source) { if (x >= threshold) { ++x; } destination.push_back(x); } } absl::Status UncompilableMatmul(absl::string_view explanation) { absl::Status s = absl::CancelledError(explanation); s.SetPayload(kUncompilableFusion, absl::Cord(explanation)); return s; } absl::StatusOr<HloInstruction*> MakeSparseMetaOperand( HloDotInstruction& dot, const TritonGemmConfig& config) { CHECK_EQ(dot.sparse_operands(), 1); CHECK_EQ(dot.sparsity().front().index(), 0); HloInstruction* meta = dot.mutable_operand(2); const Shape& shape = meta->shape(); if (shape.dimensions().back() % config.split_k != 0) { return UncompilableMatmul("Sparsity metadata has incorrect shape."); } std::vector<int64_t> dimensions(shape.dimensions().begin(), shape.dimensions().end() - 1); dimensions.push_back(config.split_k); dimensions.push_back(shape.dimensions().back() / config.split_k); Shape new_shape = ShapeUtil::MakeShapeWithDescendingLayout( shape.element_type(), dimensions); return MakeBitcastHlo(meta, new_shape); } } absl::StatusOr<HloInstruction*> MakeSplitKOperand( HloInstruction& dot, const TritonFusionAnalysis& analysis, const TritonGemmConfig& config, const int64_t contracting_dim_idx, const int operand_number) { HloInstruction* operand = dot.mutable_operand(operand_number); const int64_t k = operand->shape().dimensions(contracting_dim_idx); const bool need_padding = k % config.split_k != 0; TritonFusionAnalysis::Scope scope = (operand_number == 0) ? TritonFusionAnalysis::Scope::LHS : TritonFusionAnalysis::Scope::RHS; auto check_if_supported = [&](const HloInstruction& hlo, bool check_divisibility) { const TensorIterationSpec::DimIterationSpec* spec = analysis.IterSpec(scope, &hlo, contracting_dim_idx); if (spec == nullptr) { return absl::OkStatus(); } if (spec->size() != 1) { return UncompilableMatmul("Unsupported case."); } const TensorIterationSpec::IterationSpecFragment& fragment = spec->at(0); if (fragment.is_sliced()) { return UncompilableMatmul( "Sliced contracting dimension is not supported yet."); } if (check_divisibility && !HasDivisibleSuffixAllowingSplit( fragment.subfragments, config.split_k)) { return UncompilableMatmul("Contracting dimension is too fragmented."); } if (config.split_k > ceil(1.0 * fragment.count / config.block_k)) { return UncompilableMatmul( "Too small divisible part of the contracting dimension."); } return absl::OkStatus(); }; TF_RETURN_IF_ERROR( check_if_supported(*operand, !need_padding)); for (const HloInstruction* param : analysis.ScopeParameters(scope)) { TF_RETURN_IF_ERROR( check_if_supported(*param, !need_padding)); } if (need_padding) { HloInstruction* const zero = dot.parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(operand->shape().element_type()))); PaddingConfig padding_config = MakeNoPaddingConfig(operand->shape().rank()); padding_config.mutable_dimensions(contracting_dim_idx) ->set_edge_padding_high(config.split_k - k % config.split_k); TF_ASSIGN_OR_RETURN(HloInstruction * pad, MakePadHlo(operand, zero, padding_config)); *pad->mutable_shape()->mutable_layout() = operand->shape().layout(); operand = pad; } CHECK_GE(operand->shape().dimensions(contracting_dim_idx), config.split_k); const Shape& shape = operand->shape(); Shape new_shape(shape.element_type(), {}, {}, {}); for (int i = 0; i < shape.rank(); ++i) { const int64_t dimension_size = shape.dimensions(i); if (i == contracting_dim_idx) { new_shape.add_dimensions(config.split_k); new_shape.add_dimensions(dimension_size / config.split_k); } else { new_shape.add_dimensions(dimension_size); } } Layout* new_layout = new_shape.mutable_layout(); for (int64_t logical_dim_idx : shape.layout().minor_to_major()) { if (logical_dim_idx >= contracting_dim_idx) { new_layout->add_minor_to_major(logical_dim_idx + 1); } if (logical_dim_idx <= contracting_dim_idx) { new_layout->add_minor_to_major(logical_dim_idx); } } return MakeBitcastHlo(operand, new_shape); } absl::Status MakeDotComputationSplitKBatch( HloComputation* computation, const TritonGemmConfig& config, bool disable_reduced_precision_reduction) { HloDotInstruction* dot = Cast<HloDotInstruction>( hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot)); TF_ASSIGN_OR_RETURN(const auto analysis, TritonFusionAnalysis::Execute(*computation)); const DotDimensionNumbers& old_dim_numbers = dot->dot_dimension_numbers(); DotDimensionNumbers new_dim_numbers; TF_ASSIGN_OR_RETURN(const int64_t lhs_contracting_idx, ContractingDimensionIndex(*dot, 0)); CopyIncrementingAboveThreshold( old_dim_numbers.lhs_contracting_dimensions(), *new_dim_numbers.mutable_lhs_contracting_dimensions(), lhs_contracting_idx); new_dim_numbers.mutable_lhs_batch_dimensions()->Add(lhs_contracting_idx); CopyIncrementingAboveThreshold( old_dim_numbers.lhs_batch_dimensions(), *new_dim_numbers.mutable_lhs_batch_dimensions(), lhs_contracting_idx); TF_ASSIGN_OR_RETURN(const int64_t rhs_contracting_idx, ContractingDimensionIndex(*dot, 1)); CopyIncrementingAboveThreshold( old_dim_numbers.rhs_contracting_dimensions(), *new_dim_numbers.mutable_rhs_contracting_dimensions(), rhs_contracting_idx); new_dim_numbers.mutable_rhs_batch_dimensions()->Add(rhs_contracting_idx); CopyIncrementingAboveThreshold( old_dim_numbers.rhs_batch_dimensions(), *new_dim_numbers.mutable_rhs_batch_dimensions(), rhs_contracting_idx); if (dot->sparse_operands()) { if (dot->sparsity().size() != 1 || dot->sparsity().front().index() != 0) { return UncompilableMatmul("Sparsity is only supported on left operand."); } } std::stack<HloInstruction*> to_process; absl::flat_hash_set<HloInstruction*> to_process_set; HloInstruction* current = dot; do { to_process.push(current); CHECK(to_process_set.insert(current).second); if (current->users().empty()) { break; } CHECK_EQ(current->user_count(), 1); current = current->users()[0]; if (!legacy_triton::IsDistributiveOverAddition(*current)) { return Cancelled("Operation non-distributive over addition after dot."); } } while (true); bool did_pad = false; while (!to_process.empty()) { HloInstruction* current = to_process.top(); to_process.pop(); HloInstruction* expanded; if (current == dot) { TF_ASSIGN_OR_RETURN( HloInstruction * lhs, MakeSplitKOperand(*dot, analysis, config, lhs_contracting_idx, 0)); TF_ASSIGN_OR_RETURN( HloInstruction * rhs, MakeSplitKOperand(*dot, analysis, config, rhs_contracting_idx, 1)); if (lhs->operand(0)->opcode() == HloOpcode::kPad) { CHECK_EQ(rhs->operand(0)->opcode(), HloOpcode::kPad); did_pad = true; } std::vector<SparsityDescriptor> sparsity(dot->sparsity().begin(), dot->sparsity().end()); std::vector<HloInstruction*> sparse_meta(sparsity.size()); for (int i = 0; i < sparsity.size(); ++i) { sparsity[i].set_dimension(sparsity[i].dimension() + 1); TF_ASSIGN_OR_RETURN(sparse_meta[i], MakeSparseMetaOperand(*dot, config)); } expanded = MakeDotHlo(lhs, rhs, new_dim_numbers, dot->precision_config(), dot->shape().element_type(), sparsity, sparse_meta) .value(); expanded->mutable_shape()->mutable_layout()->clear_minor_to_major(); CopyIncrementingAboveThreshold(dot->shape().layout().minor_to_major(), *expanded->mutable_shape() ->mutable_layout() ->mutable_minor_to_major(), 0); expanded->mutable_shape()->mutable_layout()->add_minor_to_major(0); dot->SetupDerivedInstruction(expanded); } else { expanded = computation->AddInstruction(current->CloneWithNewShape( ShapeUtil::PrependMajorDimension(config.split_k, current->shape()))); if (expanded->opcode() == HloOpcode::kTranspose) { const auto* old_transpose = Cast<HloTransposeInstruction>(current); auto* new_transpose = Cast<HloTransposeInstruction>(expanded); new_transpose->mutable_dimensions()->clear(); new_transpose->mutable_dimensions()->reserve( new_transpose->shape().rank()); new_transpose->mutable_dimensions()->push_back(0); for (const int64_t dim : old_transpose->dimensions()) { new_transpose->mutable_dimensions()->push_back(dim + 1); } } } TF_RETURN_IF_ERROR(current->ReplaceAllUsesWithDifferentShape(expanded)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(current)); if (current == dot) { continue; } for (int i = 0; i < expanded->operands().size(); ++i) { HloInstruction* operand = expanded->mutable_operand(i); if (!to_process_set.contains(operand)) { std::vector<int64_t> broadcast_dimensions(operand->shape().rank()); absl::c_iota(broadcast_dimensions, 1); TF_RETURN_IF_ERROR(expanded->ReplaceOperandWithDifferentShape( i, MakeBroadcastHlo(operand, broadcast_dimensions, ShapeUtil::PrependMajorDimension( config.split_k, operand->shape())))); } } } if (disable_reduced_precision_reduction) { PrimitiveType output_type = computation->root_instruction()->shape().element_type(); PrimitiveType accumulator_type = output_type == PrimitiveType::F64 ? PrimitiveType::F64 : PrimitiveType::F32; computation->root_instruction()->mutable_shape()->set_element_type( accumulator_type); } if (did_pad) { TF_RETURN_IF_ERROR( TritonFusionAnalysis::Execute(*computation, config.split_k).status()); } return absl::OkStatus(); } absl::Status MakeDotSplitKBatch(HloInstruction* dot_fusion, const TritonGemmConfig& config) { CHECK_EQ(dot_fusion->opcode(), HloOpcode::kFusion); if (dot_fusion->shape().IsTuple()) { return Unimplemented("Tuple output is not supported with split-K yet."); } const bool disable_reduced_precision_reduction = dot_fusion->GetModule() ->config() .debug_options() .xla_gpu_triton_gemm_disable_reduced_precision_reduction(); const PrimitiveType output_type = dot_fusion->shape().element_type(); const Layout output_layout = dot_fusion->shape().layout(); TF_RETURN_IF_ERROR(MakeDotComputationSplitKBatch( dot_fusion->fused_instructions_computation(), config, disable_reduced_precision_reduction)); const HloInstruction* root = dot_fusion->fused_expression_root(); *dot_fusion->mutable_shape() = root->shape(); HloInstruction* zero = dot_fusion->parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(root->shape().element_type()))); TF_ASSIGN_OR_RETURN(HloInstruction * reduce, MakeReduceHlo(dot_fusion, zero, {0}, HloOpcode::kAdd, &dot_fusion->metadata())); *reduce->mutable_shape()->mutable_layout() = output_layout; if (dot_fusion->IsRoot()) { dot_fusion->parent()->set_root_instruction(reduce, true); } else { TF_RETURN_IF_ERROR(dot_fusion->ReplaceAllUsesWithDifferentShape(reduce)); } if (disable_reduced_precision_reduction) { HloInstruction* convert = MakeConvertToHlo(reduce, output_type); if (reduce->IsRoot()) { reduce->parent()->set_root_instruction(convert, true); } else { TF_RETURN_IF_ERROR(reduce->ReplaceAllUsesWithDifferentShape(convert)); } } return absl::OkStatus(); } } }
#include "xla/service/gpu/split_k_gemm_rewriter.h" #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/hlo_verifier.h" #include "xla/service/layout_assignment.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::testing::FieldsAre; namespace m = ::xla::match; TEST(HasDivisibleSuffixAllowingSplitTest, AllTests) { EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({1}, 1)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2}, 2)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 2}, 2)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 2}, 6)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 3, 2}, 6)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({15, 2}, 6)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 15, 2}, 6)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({}, 1)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({1}, 2)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({3}, 2)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({2, 3}, 2)); } using SplitKTest = HloTestBase; TEST_F(SplitKTest, MakeSplitK) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,128]{1,0} reshape(copy.1) convert.8 = bf16[480,128]{1,0} convert(reshape.5) parameter_1 = bf16[16,128]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,128,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm", metadata={op_name="foo"} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->opcode(), HloOpcode::kReduce); EXPECT_EQ(root->metadata().op_name(), "foo"); } TEST_F(SplitKTest, MakeSplitKWithOutputFusion) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) d = f16[480,16]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} c = bf16[] constant(123) n = bf16[] negate(c) bc = bf16[480,16]{1,0} broadcast(n) cv = bf16[480,16]{1,0} convert(d) ROOT a = bf16[480,16]{1,0} multiply(bc, cv) } ENTRY e { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kReduce); } TEST_F(SplitKTest, PreventSplitKWithNonDistributiveOperations) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) d = f16[480,16]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} c = f32[480,16]{1,0} convert(d) ROOT s = f32[480,16]{1,0} tanh(c) } ENTRY e { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) ROOT fusion = f32[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); EXPECT_THAT(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs( tsl::error::CANCELLED, absl::StrFormat( "Operation non-distributive over addition after dot."))); } TEST_F(SplitKTest, MakeSplitKWithNonDivisibleDimensionSize) { constexpr absl::string_view kHloText = R"( t { c1 = s32[] constant(1) bc1 = s32[31]{0} broadcast(c1), dimensions={} p0 = s32[31]{0} parameter(0) cmp = pred[31]{0} compare(bc1, p0), direction=EQ cvt = f32[31]{0} convert(cmp) bc2 = f32[17,31]{1,0} broadcast(cvt), dimensions={1} c0 = f32[] constant(0) bc0 = f32[17,16]{1,0} broadcast(c0), dimensions={} ROOT dot = f32[31,16]{1,0} dot(bc2, bc0), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { p0 = s32[31]{0} parameter(0) ROOT r = f32[31,16]{1,0} fusion(p0), kind=kCustom, calls=t, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 2, 1, 2); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, AvoidSplitKWithSlicedContractingDimension) { const std::string hlo_text = R"( t { p0 = f16[32,1234] parameter(0) s0 = f16[32,256] slice(p0), slice={[0:32], [41:297]} p1 = f16[256,768] parameter(1) ROOT d = f16[32,768] dot(s0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f16[32,1234] parameter(0) p1 = f16[256,768] parameter(1) ROOT r = f16[32,768] fusion(p0, p1), kind=kCustom, calls=t, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 2, 1, 2); EXPECT_THAT(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs( tsl::error::CANCELLED, absl::StrFormat( "Sliced contracting dimension is not supported yet."))); } TEST_F(SplitKTest, MakeSplitKWithNonStandardOutputLayout) { const std::string kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,128]{1,0} reshape(copy.1) convert.8 = bf16[480,128]{1,0} convert(reshape.5) parameter_1 = bf16[16,128]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{0,1} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,128,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{0,1} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kReduce); EXPECT_EQ(module->entry_computation()->root_instruction()->shape().layout(), Layout({0, 1})); } TEST_F(SplitKTest, MakeSplitKWithExistingBatchDim) { const std::string hlo_text = R"( HloModule m triton_gemm_dot.24 { parameter_1 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1) bitcast.3 = bf16[800,5,128]{2,1,0} bitcast(parameter_1) convert.3 = f32[800,5,128]{2,1,0} convert(bitcast.3) parameter_0 = f32[1,5,700,800]{3,2,1,0} parameter(0) bitcast.2 = f32[5,700,800]{2,1,0} bitcast(parameter_0) ROOT dot.26 = f32[5,128,700]{2,1,0} dot(convert.3, bitcast.2), lhs_batch_dims={1}, lhs_contracting_dims={0}, rhs_batch_dims={0}, rhs_contracting_dims={2} } ENTRY e { tmp_3 = f32[1,5,700,800]{3,2,1,0} parameter(0) tmp_0 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1) ROOT triton_gemm_dot.24 = f32[5,128,700]{2,1,0} fusion(tmp_3, tmp_0), kind=kCustom, calls=triton_gemm_dot.24, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(32, 64, 64, 8, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kReduce); } TEST_F(SplitKTest, SupportsIndivisible) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,129,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,129]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,129]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,129]{1,0} reshape(copy.1) convert.8 = bf16[480,129]{1,0} convert(reshape.5) parameter_1 = bf16[16,129]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,129,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,129]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK4) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[480,129]{1,0} parameter(0) convert_0 = bf16[480,129]{1,0} convert(parameter_0) parameter_1 = bf16[16,129]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[480,129]{1,0} parameter(0) p1 = bf16[16,129]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportsIndivisibleWithCustomLayout) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[480,129]{0,1} parameter(0) convert_0 = bf16[480,129]{0,1} convert(parameter_0) parameter_1 = bf16[16,129]{0,1} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[480,129]{0,1} parameter(0) p1 = bf16[16,129]{0,1} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); constexpr TritonGemmConfig kConfig(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), kConfig)); TF_EXPECT_OK(HloVerifier(true, true, LayoutAssignment::InstructionCanChangeLayout) .Run(module.get()) .status()); } TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK16) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[480,255]{1,0} parameter(0) convert_0 = bf16[480,255]{1,0} convert(parameter_0) parameter_1 = bf16[16,255]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[480,255]{1,0} parameter(0) p1 = bf16[16,255]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 16, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportsIndivisibleWithTranspose) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[480,255]{1,0} parameter(0) convert_0 = bf16[480,255]{1,0} convert(parameter_0) transpose_0 = bf16[255,480]{1,0} transpose(convert_0), dimensions={1,0} parameter_1 = bf16[16,255]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(transpose_0, parameter_1), lhs_contracting_dims={0}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[480,255]{1,0} parameter(0) p1 = bf16[16,255]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 16, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportIndivisibleWithBroadcast) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[] parameter(0) convert_0 = bf16[] convert(parameter_0) broadcast_0 = bf16[480,255]{1,0} broadcast(convert_0) parameter_1 = bf16[16,255]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(broadcast_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[] parameter(0) p1 = bf16[16,255]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 16, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportsIndivisibleWithBitcast) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,5,480,17]{3,0,1,2} parameter(0) convert_0 = bf16[3,5,480,17]{3,0,1,2} convert(parameter_0) bitcast_0 = bf16[480,255]{1,0} bitcast(convert_0) parameter_1 = bf16[16,255]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(bitcast_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,5,480,17]{3,0,1,2} parameter(0) p1 = bf16[16,255]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 16, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SkipSmallK) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,64,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,64]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,64]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,64]{1,0} reshape(copy.1) convert.8 = bf16[480,64]{1,0} convert(reshape.5) parameter_1 = bf16[16,64]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,64,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,64]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 128, 4, 1, 4); EXPECT_THAT(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs( tsl::error::CANCELLED, "Too small divisible part of the contracting dimension.")); } TEST_F(SplitKTest, FragmentedKSupported) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f16[7,2,16,4,20] parameter(0) t0 = f16[2,16,4,20,7] transpose(p0), dimensions={1,2,3,4,0} b0 = f16[2560,7] bitcast(t0) a1 = f16[2560,5] parameter(1) ROOT r = f16[7,5] dot(b0, a1), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { p0 = f16[7,2,16,4,20] parameter(0) p1 = f16[2560,5] parameter(1) ROOT fusion = f16[7,5] fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(32, 32, 16, 1, 1, 4); config.split_k = 5; EXPECT_THAT( MakeDotSplitKBatch(module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs(tsl::error::CANCELLED, "Contracting dimension is too fragmented.")); config.split_k = 8; TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->opcode(), HloOpcode::kReduce); const HloComputation* dot_computation = module->entry_computation() ->root_instruction() ->operand(0) ->called_computations()[0]; const HloInstruction* p0 = dot_computation->parameter_instruction(0); TF_ASSERT_OK_AND_ASSIGN( const auto analysis, TritonFusionAnalysis::Execute(*dot_computation, config.split_k)); EXPECT_EQ(dot_computation->root_instruction()->shape(), ShapeUtil::MakeShapeWithDescendingLayout(F16, {8, 7, 5})); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(1, 2560, 0, 2560, ElementsAre(20, 4, 4, 4, 2)))); } TEST_F(SplitKTest, FragmentedKUnsupported) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f32[3,128,77] parameter(0) b0 = f32[384,77] bitcast(p0) a1 = f32[384,25] parameter(1) ROOT r = f32[77,25] dot(b0, a1), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[3,128,77] parameter(0) p1 = f32[384,25] parameter(1) ROOT fusion = f32[77,25] fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); EXPECT_THAT( MakeDotSplitKBatch(module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs(tsl::error::CANCELLED, "Contracting dimension is too fragmented.")); } TEST_F(SplitKTest, MakeSplitKWithNonDefaultOutputLayout) { const std::string kHloText = R"( triton_gemm_dot.4842_computation { parameter_0 = bf16[96,96]{1,0} parameter(0) parameter_1 = bf16[96,7]{1,0} parameter(1) dot.0 = bf16[96,7]{0,1} dot(parameter_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT bitcast.2 = bf16[7,3,32]{2,1,0} bitcast(dot.0) } ENTRY e { parameter_0.91 = bf16[96,96]{1,0} parameter(0) parameter_1.86 = bf16[96,7]{1,0} parameter(1) ROOT triton_gemm_dot.4842 = bf16[7,3,32]{2,1,0} fusion(parameter_0.91, parameter_1.86), kind=kCustom, calls=triton_gemm_dot.4842_computation })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 2, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kReduce); const HloComputation* dot_computation = module->entry_computation() ->root_instruction() ->operand(0) ->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); } TEST_F(SplitKTest, SparseDotWithLhsSparseOperandIsRewritten) { const std::string hlo_text = R"( HloModule test triton_gemm { lhs = f16[2,5,1600] parameter(0) rhs = f16[2,3200,10] parameter(1) meta = u16[2,5,200] parameter(2) ROOT dot = f32[2,5,10] dot(lhs, rhs, meta), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=L.2@2:4 } ENTRY e { lhs = f16[2,5,1600] parameter(0) rhs = f16[2,3200,10] parameter(1) meta = u16[2,5,200] parameter(2) ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta), kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 1); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->opcode(), HloOpcode::kReduce); HloInstruction* dot = module->GetComputationWithName("triton_gemm")->root_instruction(); EXPECT_EQ(dot->operand(0)->shape(), ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 5, 4, 400})); EXPECT_EQ(dot->operand(1)->shape(), ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 4, 800, 10})); EXPECT_EQ(dot->operand(2)->shape(), ShapeUtil::MakeShapeWithDescendingLayout(U16, {2, 5, 4, 50})); } TEST_F(SplitKTest, SparseDotWithRhsSparseOperandTriggersError) { const std::string hlo_text = R"( HloModule test triton_gemm { lhs = f16[2,5,3200] parameter(0) rhs = f16[2,1600,10] parameter(1) meta = u16[2,200,10] parameter(2) ROOT dot = f32[2,5,10] dot(lhs, rhs, meta), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=R.1@2:4 } ENTRY e { lhs = f16[2,5,3200] parameter(0) rhs = f16[2,1600,10] parameter(1) meta = u16[2,200,10] parameter(2) ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta), kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 1); auto result = MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config); EXPECT_FALSE(result.ok()); } class SplitKTestWithMorePreciseReduction : public HloTestBase, public ::testing::WithParamInterface<int> { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction( true); return debug_options; } }; TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitK) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,128]{1,0} reshape(copy.1) convert.8 = bf16[480,128]{1,0} convert(reshape.5) parameter_1 = bf16[16,128]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,128,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant())))); } TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitKWithOutputFusion) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) d = f16[480,16]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} c = bf16[] constant(123) n = bf16[] negate(c) bc = bf16[480,16]{1,0} broadcast(n) cv = bf16[480,16]{1,0} convert(d) ROOT a = bf16[480,16]{1,0} multiply(bc, cv) } ENTRY e { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant())))); } TEST_F(SplitKTest, MakeSplitKWithTransposeAfterDot) { const std::string hlo_text = R"( triton_gemm_dot { p0 = f16[8,288,288]{2,1,0} parameter(0) p1 = f16[8,288,32]{2,0,1} parameter(1) d = f16[8,288,32]{2,1,0} dot(p0, p1), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={1} ROOT t = f16[288,8,32]{2,1,0} transpose(d), dimensions={1,0,2} } ENTRY e { p0 = f16[8,288,288]{2,1,0} parameter(0) p1 = f16[8,288,32]{2,0,1} parameter(1) ROOT fusion = f16[288,8,32]{2,1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 128, 32, 8, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); const auto* transpose = Cast<HloTransposeInstruction>(module->entry_computation() ->root_instruction() ->operand(0) ->fused_instructions_computation() ->root_instruction()); EXPECT_THAT(transpose->dimensions(), ElementsAre(0, 2, 1, 3)); } TEST_F(SplitKTest, MakeSplitKWithTrivialDimension) { const std::string hlo_text = R"( triton_gemm_dot { parameter_0 = f32[1001,1]{1,0} parameter(0) parameter_1 = f32[1001,2048]{1,0} parameter(1) ROOT dot = f32[1,2048]{1,0} dot(parameter_0, parameter_1), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY %entry_computation { p0 = f32[1001,1]{1,0} parameter(0) p1 = f32[1001,2048]{1,0} parameter(1) ROOT fusion = f32[1,2048]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 128, 64, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Reduce(m::Fusion(), m::Constant()))); } } } }
#include "xla/service/gpu/split_k_gemm_rewriter.h" #include <cmath> #include <cstdint> #include <iterator> #include <stack> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/cord.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/layout.h" #include "xla/literal_util.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/gpu/triton_support.h" #include "xla/service/gpu/triton_tiling_propagation.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { bool HasDivisibleSuffixAllowingSplit(const absl::Span<int64_t const> span, const int64_t divisor) { CHECK_GE(divisor, 1); int64_t product = 1; for (auto it = span.crbegin(); it != span.crend(); ++it) { product *= *it; if (product % divisor == 0) { return true; } if (divisor % product != 0) { return false; } } return false; }
TEST(HasDivisibleSuffixAllowingSplitTest, AllTests) { EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({1}, 1)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2}, 2)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 2}, 2)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 2}, 6)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 3, 2}, 6)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({15, 2}, 6)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 15, 2}, 6)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({}, 1)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({1}, 2)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({3}, 2)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({2, 3}, 2)); }
#include "tensorflow/compiler/mlir/register_common_dialects.h" #include "mlir/Dialect/Quant/QuantOps.h" #include "mlir/Dialect/Shape/IR/Shape.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tosa/IR/TosaOps.h" #include "mlir/InitAllDialects.h" #include "mlir/InitAllExtensions.h" #include "stablehlo/dialect/Register.h" #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h" #include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h" #include "tensorflow/compiler/mlir/tensorflow/utils/mlprogram_util.h" #include "tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.h" #include "xla/mlir/framework/ir/xla_framework.h" #include "xla/mlir_hlo/mhlo/IR/register.h" namespace mlir { void RegisterCommonToolingDialects(mlir::DialectRegistry& registry) { mlir::RegisterAllTensorFlowDialects(registry); mlir::mhlo::registerAllMhloDialects(registry); mlir::registerAllDialects(registry); mlir::registerAllExtensions(registry); mlir::stablehlo::registerAllDialects(registry); registry.insert<mlir::TFL::TensorFlowLiteDialect>(); registry.insert<mlir::kernel_gen::tf_framework::TFFrameworkDialect>(); registry.insert<mlir::quant::QuantizationDialect>(); registry.insert<mlir::quantfork::QuantizationForkDialect>(); registry.insert<mlir::shape::ShapeDialect>(); registry.insert<mlir::tensor::TensorDialect>(); registry.insert<mlir::tosa::TosaDialect>(); registry.insert<mlir::xla_framework::XLAFrameworkDialect, mlir::TF::TensorFlowDialect, mlir::tf_type::TFTypeDialect>(); } };
#include "tensorflow/compiler/mlir/register_common_dialects.h" #include <gtest/gtest.h> #include "mlir/IR/DialectRegistry.h" namespace mlir { namespace { TEST(RegisterCommonDialectsTest, DoesntCrash) { mlir::DialectRegistry registry; mlir::RegisterCommonToolingDialects(registry); EXPECT_FALSE(registry.getDialectNames().empty()); } } }
#include "tensorflow/compiler/mlir/register_common_dialects.h" #include "mlir/Dialect/Quant/QuantOps.h" #include "mlir/Dialect/Shape/IR/Shape.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tosa/IR/TosaOps.h" #include "mlir/InitAllDialects.h" #include "mlir/InitAllExtensions.h" #include "stablehlo/dialect/Register.h" #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h" #include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h" #include "tensorflow/compiler/mlir/tensorflow/utils/mlprogram_util.h" #include "tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.h" #include "xla/mlir/framework/ir/xla_framework.h" #include "xla/mlir_hlo/mhlo/IR/register.h" namespace mlir { void RegisterCommonToolingDialects(mlir::DialectRegistry& registry) { mlir::RegisterAllTensorFlowDialects(registry); mlir::mhlo::registerAllMhloDialects(registry); mlir::registerAllDialects(registry); mlir::registerAllExtensions(registry); mlir::stablehlo::registerAllDialects(registry); registry.insert<mlir::TFL::TensorFlowLiteDialect>(); registry.insert<mlir::kernel_gen::tf_framework::TFFrameworkDialect>(); registry.insert<mlir::quant::QuantizationDialect>(); registry.insert<mlir::quantfork::QuantizationForkDialect>(); registry.insert<mlir::shape::ShapeDialect>(); registry.insert<mlir::tensor::TensorDialect>(); registry.insert<mlir::tosa::TosaDialect>(); registry.insert<mlir::xla_framework::XLAFrameworkDialect, mlir::TF::TensorFlowDialect, mlir::tf_type::TFTypeDialect>(); }
#include "tensorflow/compiler/mlir/register_common_dialects.h" #include <gtest/gtest.h> #include "mlir/IR/DialectRegistry.h" namespace mlir { namespace { TEST(RegisterCommonDialectsTest, DoesntCrash) { mlir::DialectRegistry registry; mlir::RegisterCommonToolingDialects(registry); EXPECT_FALSE(registry.getDialectNames().empty()); }
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/dataset_stateful_op_allowlist.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::DimensionHandle; using shape_inference::InferenceContext; using shape_inference::ShapeAndType; using shape_inference::ShapeHandle; namespace { Status TwoElementVectorInputsAndScalarOutputs(InferenceContext* c) { ShapeHandle handle; DimensionHandle unused_handle; for (int i = 0; i < c->num_inputs(); ++i) { TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle)); TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle)); } for (int i = 0; i < c->num_outputs(); ++i) { c->set_output(i, c->Scalar()); } return absl::OkStatus(); } Status ScalarAndTwoElementVectorInputsAndScalarOutputs(InferenceContext* c) { ShapeHandle handle; DimensionHandle unused_handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle)); for (int i = 1; i < c->num_inputs(); ++i) { TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle)); TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle)); } for (int i = 0; i < c->num_outputs(); ++i) { c->set_output(i, c->Scalar()); } return absl::OkStatus(); } Status TwoElementOutput(InferenceContext* c) { c->set_output(0, c->Vector(2)); return absl::OkStatus(); } Status ScalarOutput(InferenceContext* c) { c->set_output(0, c->Scalar()); return absl::OkStatus(); } } REGISTER_OP("LookupTableFind") .Input("table_handle: Ref(string)") .Input("keys: Tin") .Input("default_value: Tout") .Output("values: Tout") .Attr("Tin: type") .Attr("Tout: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle)); DimensionHandle unused_dim; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(2), 1, &unused)); c->set_output(0, c->UnknownShape()); return absl::OkStatus(); }); Status ValidateTableType(InferenceContext* c, const ShapeAndType& key_shape_and_type, const string& key_dtype_attr, const ShapeAndType& value_shape_and_type, const string& value_dtype_attr) { DataType key_dtype; TF_RETURN_IF_ERROR(c->GetAttr(key_dtype_attr, &key_dtype)); if (key_shape_and_type.dtype != key_dtype) { return errors::InvalidArgument( "Trying to read value with wrong dtype. " "Expected ", DataTypeString(key_shape_and_type.dtype), " got ", DataTypeString(key_dtype)); } DataType value_dtype; TF_RETURN_IF_ERROR(c->GetAttr(value_dtype_attr, &value_dtype)); if (value_shape_and_type.dtype != value_dtype) { return errors::InvalidArgument( "Trying to read value with wrong dtype. " "Expected ", DataTypeString(value_shape_and_type.dtype), " got ", DataTypeString(value_dtype)); } return absl::OkStatus(); } Status ValidateTableResourceHandle(InferenceContext* c, ShapeHandle keys, const string& key_dtype_attr, const string& value_dtype_attr, ShapeAndType* output_shape_and_type) { auto* handle_data = c->input_handle_shapes_and_types(0); if (handle_data == nullptr || handle_data->size() != 2) { output_shape_and_type->shape = c->UnknownShape(); output_shape_and_type->dtype = DT_INVALID; } else { const ShapeAndType& key_shape_and_type = (*handle_data)[0]; const ShapeAndType& value_shape_and_type = (*handle_data)[1]; TF_RETURN_IF_ERROR(ValidateTableType(c, key_shape_and_type, key_dtype_attr, value_shape_and_type, value_dtype_attr)); output_shape_and_type->dtype = value_shape_and_type.dtype; if (c->RankKnown(key_shape_and_type.shape) && c->RankKnown(keys)) { int keys_rank = c->Rank(keys); int key_suffix_rank = c->Rank(key_shape_and_type.shape); if (keys_rank < key_suffix_rank) { return errors::InvalidArgument( "Expected keys to have suffix ", c->DebugString(key_shape_and_type.shape), " but saw shape: ", c->DebugString(keys)); } for (int d = 0; d < key_suffix_rank; d++) { DimensionHandle dim = c->Dim(key_shape_and_type.shape, d); TF_RETURN_IF_ERROR( c->ReplaceDim(keys, keys_rank - key_suffix_rank + d, dim, &keys)); } std::vector<DimensionHandle> keys_prefix_vec; keys_prefix_vec.reserve(keys_rank - key_suffix_rank); for (int d = 0; d < keys_rank - key_suffix_rank; ++d) { keys_prefix_vec.push_back(c->Dim(keys, d)); } ShapeHandle keys_prefix = c->MakeShape(keys_prefix_vec); TF_RETURN_IF_ERROR(c->Concatenate(keys_prefix, value_shape_and_type.shape, &output_shape_and_type->shape)); } else { output_shape_and_type->shape = c->UnknownShape(); } } return absl::OkStatus(); } REGISTER_OP("LookupTableFindV2") .Input("table_handle: resource") .Input("keys: Tin") .Input("default_value: Tout") .Output("values: Tout") .Attr("Tin: type") .Attr("Tout: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle)); ShapeAndType value_shape_and_type; TF_RETURN_IF_ERROR(ValidateTableResourceHandle( c, c->input(1), "Tin", "Tout", &value_shape_and_type)); c->set_output(0, value_shape_and_type.shape); return absl::OkStatus(); }); ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableFindV2"); REGISTER_OP("LookupTableInsert") .Input("table_handle: Ref(string)") .Input("keys: Tin") .Input("values: Tout") .Attr("Tin: type") .Attr("Tout: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle)); DimensionHandle unused_dim; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); return absl::OkStatus(); }); REGISTER_OP("LookupTableInsertV2") .Input("table_handle: resource") .Input("keys: Tin") .Input("values: Tout") .Attr("Tin: type") .Attr("Tout: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle)); return absl::OkStatus(); }); REGISTER_OP("LookupTableRemoveV2") .Input("table_handle: resource") .Input("keys: Tin") .Attr("Tin: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &handle)); return absl::OkStatus(); }); REGISTER_OP("LookupTableSize") .Input("table_handle: Ref(string)") .Output("size: int64") .SetShapeFn(TwoElementVectorInputsAndScalarOutputs); ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableSize"); REGISTER_OP("LookupTableSizeV2") .Input("table_handle: resource") .Output("size: int64") .SetShapeFn(ScalarAndTwoElementVectorInputsAndScalarOutputs); ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableSizeV2"); REGISTER_OP("LookupTableExport") .Input("table_handle: Ref(string)") .Output("keys: Tkeys") .Output("values: Tvalues") .Attr("Tkeys: type") .Attr("Tvalues: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle)); DimensionHandle unused_dim; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); ShapeHandle values = c->UnknownShape(); TF_RETURN_IF_ERROR(c->WithRankAtLeast(values, 1, &values)); ShapeHandle keys = c->Vector(c->Dim(values, 0)); c->set_output(0, keys); c->set_output(1, values); return absl::OkStatus(); }); REGISTER_OP("LookupTableExportV2") .Input("table_handle: resource") .Output("keys: Tkeys") .Output("values: Tvalues") .Attr("Tkeys: type") .Attr("Tvalues: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle)); auto* handle_data = c->input_handle_shapes_and_types(0); if (handle_data != nullptr && handle_data->size() == 2) { const ShapeAndType& key_shape_and_type = (*handle_data)[0]; const ShapeAndType& value_shape_and_type = (*handle_data)[1]; TF_RETURN_IF_ERROR(ValidateTableType(c, key_shape_and_type, "Tkeys", value_shape_and_type, "Tvalues")); } c->set_output(0, c->UnknownShape()); c->set_output(1, c->UnknownShape()); return absl::OkStatus(); }); REGISTER_OP("LookupTableImport") .Input("table_handle: Ref(string)") .Input("keys: Tin") .Input("values: Tout") .Attr("Tin: type") .Attr("Tout: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle)); DimensionHandle unused_dim; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); return absl::OkStatus(); }); REGISTER_OP("LookupTableImportV2") .Input("table_handle: resource") .Input("keys: Tin") .Input("values: Tout") .Attr("Tin: type") .Attr("Tout: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle)); ShapeHandle keys; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &keys)); ShapeHandle values; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 1, &values)); DimensionHandle unused; TF_RETURN_IF_ERROR(c->Merge(c->Dim(keys, 0), c->Dim(values, 0), &unused)); return absl::OkStatus(); }); Status MutableHashTableShape(InferenceContext* c, const ShapeHandle& key, const ShapeHandle& value) { c->set_output(0, c->Scalar()); ShapeHandle key_s; TF_RETURN_IF_ERROR(c->WithRankAtMost(key, 1, &key_s)); DataType key_t; TF_RETURN_IF_ERROR(c->GetAttr("key_dtype", &key_t)); DataType value_t; TF_RETURN_IF_ERROR(c->GetAttr("value_dtype", &value_t)); c->set_output_handle_shapes_and_types( 0, std::vector<ShapeAndType>{{key_s, key_t}, {value, value_t}}); return absl::OkStatus(); } Status MutableHashTableShapeFn(InferenceContext* c) { return MutableHashTableShape(c, c->Scalar(), c->Scalar()); } Status MutableHashTableOfTensorsShapeFn(InferenceContext* c) { PartialTensorShape value_p; TF_RETURN_IF_ERROR(c->GetAttr("value_shape", &value_p)); ShapeHandle value_s; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(value_p, &value_s)); return MutableHashTableShape(c, c->Scalar(), value_s); } Status MutableDenseHashTableShapeFn(InferenceContext* c) { PartialTensorShape value_p; TF_RETURN_IF_ERROR(c->GetAttr("value_shape", &value_p)); ShapeHandle value_s; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(value_p, &value_s)); return MutableHashTableShape(c, c->input(0), value_s); } REGISTER_OP("HashTable") .Output("table_handle: Ref(string)") .Attr("container: string = ''") .Attr("shared_name: string = ''") .Attr("use_node_name_sharing: bool = false") .Attr("key_dtype: type") .Attr("value_dtype: type") .SetIsStateful() .SetShapeFn(TwoElementOutput); REGISTER_OP("HashTableV2") .Output("table_handle: resource") .Attr("container: string = ''") .Attr("shared_name: string = ''") .Attr("use_node_name_sharing: bool = false") .Attr("key_dtype: type") .Attr("value_dtype: type") .SetIsStateful() .SetShapeFn(ScalarOutput); REGISTER_OP("AnonymousHashTable") .Output("table_handle: resource") .Attr("key_dtype: type") .Attr("value_dtype: type") .SetIsStateful() .SetShapeFn(ScalarOutput); REGISTER_OP("MutableHashTable") .Output("table_handle: Ref(string)") .Attr("container: string = ''") .Attr("shared_name: string = ''") .Attr("use_node_name_sharing: bool = false") .Attr("key_dtype: type") .Attr("value_dtype: type") .SetIsStateful() .SetShapeFn(TwoElementOutput); REGISTER_OP("MutableHashTableV2") .Output("table_handle: resource") .Attr("container: string = ''") .Attr("shared_name: string = ''") .Attr("use_node_name_sharing: bool = false") .Attr("key_dtype: type") .Attr("value_dtype: type") .SetIsStateful() .SetShapeFn(MutableHashTableShapeFn); REGISTER_OP("AnonymousMutableHashTable") .Output("table_handle: resource") .Attr("key_dtype: type") .Attr("value_dtype: type") .SetIsStateful() .SetShapeFn(MutableHashTableShapeFn); REGISTER_OP("MutableHashTableOfTensors") .Output("table_handle: Ref(string)") .Attr("container: string = ''") .Attr("shared_name: string = ''") .Attr("use_node_name_sharing: bool = false") .Attr("key_dtype: type") .Attr("value_dtype: type") .Attr("value_shape: shape = {}") .SetIsStateful() .SetShapeFn(TwoElementOutput); REGISTER_OP("MutableHashTableOfTensorsV2") .Output("table_handle: resource") .Attr("container: string = ''") .Attr("shared_name: string = ''") .Attr("use_node_name_sharing: bool = false") .Attr("key_dtype: type") .Attr("value_dtype: type") .Attr("value_shape: shape = {}") .SetIsStateful() .SetShapeFn(MutableHashTableOfTensorsShapeFn); REGISTER_OP("AnonymousMutableHashTableOfTensors") .Output("table_handle: resource") .Attr("key_dtype: type") .Attr("value_dtype: type") .Attr("value_shape: shape = {}") .SetIsStateful() .SetShapeFn(MutableHashTableOfTensorsShapeFn); REGISTER_OP("MutableDenseHashTable") .Input("empty_key: key_dtype") .Output("table_handle: Ref(string)") .Attr("container: string = ''") .Attr("shared_name: string = ''") .Attr("use_node_name_sharing: bool = false") .Attr("key_dtype: type") .Attr("value_dtype: type") .Attr("value_shape: shape = {}") .Attr("initial_num_buckets: int = 131072") .Attr("max_load_factor: float = 0.8") .SetIsStateful() .SetShapeFn(TwoElementOutput); REGISTER_OP("MutableDenseHashTableV2") .Input("empty_key: key_dtype") .Input("deleted_key: key_dtype") .Output("table_handle: resource") .Attr("container: string = ''") .Attr("shared_name: string = ''") .Attr("use_node_name_sharing: bool = false") .Attr("key_dtype: type") .Attr("value_dtype: type") .Attr("value_shape: shape = {}") .Attr("initial_num_buckets: int = 131072") .Attr("max_load_factor: float = 0.8") .SetIsStateful() .SetShapeFn(MutableDenseHashTableShapeFn); REGISTER_OP("AnonymousMutableDenseHashTable") .Input("empty_key: key_dtype") .Input("deleted_key: key_dtype") .Output("table_handle: resource") .Attr("key_dtype: type") .Attr("value_dtype: type") .Attr("value_shape: shape = {}") .Attr("initial_num_buckets: int = 131072") .Attr("max_load_factor: float = 0.8") .SetIsStateful() .SetShapeFn(MutableDenseHashTableShapeFn); REGISTER_OP("InitializeTable") .Input("table_handle: Ref(string)") .Input("keys: Tkey") .Input("values: Tval") .Attr("Tkey: type") .Attr("Tval: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle)); DimensionHandle unused_dim; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); ShapeHandle keys; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &keys)); TF_RETURN_IF_ERROR(c->Merge(keys, c->input(2), &keys)); return absl::OkStatus(); }); REGISTER_OP("InitializeTableV2") .Input("table_handle: resource") .Input("keys: Tkey") .Input("values: Tval") .Attr("Tkey: type") .Attr("Tval: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle)); ShapeHandle keys; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &keys)); TF_RETURN_IF_ERROR(c->Merge(keys, c->input(2), &keys)); return absl::OkStatus(); }); REGISTER_OP("InitializeTableFromTextFile") .Input("table_handle: Ref(string)") .Input("filename: string") .Attr("key_index: int >= -2") .Attr("value_index: int >= -2") .Attr("vocab_size: int >= -1 = -1") .Attr("delimiter: string = '\t'") .Attr("offset: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle)); DimensionHandle unused_dim; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &handle)); return absl::OkStatus(); }); REGISTER_OP("InitializeTableFromTextFileV2") .Input("table_handle: resource") .Input("filename: string") .Attr("key_index: int >= -2") .Attr("value_index: int >= -2") .Attr("vocab_size: int >= -1 = -1") .Attr("delimiter: string = '\t'") .Attr("offset: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &handle)); return absl::OkStatus(); }); }
#include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { TEST(LookupOpsTest, LookupTableFindV2_ShapeFn) { ShapeInferenceTestOp op("LookupTableFindV2"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?"); TF_ASSERT_OK(NodeDefBuilder("test", "LookupTableFindV2") .Input({"table_handle", 0, DT_RESOURCE}) .Input({"keys", 0, DT_INT64}) .Input({"default_value", 0, DT_FLOAT}) .Attr("Tin", DT_INT64) .Attr("Tout", DT_FLOAT) .Finalize(&op.node_def)); std::vector<std::vector<ShapeInferenceTestOp::ShapeAndType>> types; auto set_types = [&op, &types](DataType key_type, DataType value_type) { types.emplace_back(); auto& table = types.back(); table.emplace_back("[3]", key_type); table.emplace_back("[4]", value_type); op.input_resource_handle_shapes_and_types = {&table, nullptr, nullptr}; }; INFER_OK(op, "[];[?,3];[4]", "?"); set_types(DT_INT32, DT_FLOAT); INFER_ERROR("read value with wrong dtype", op, "[];[?,3];[4]"); set_types(DT_INT64, DT_INT64); INFER_ERROR("read value with wrong dtype", op, "[];[?,3];[4]"); set_types(DT_INT64, DT_FLOAT); INFER_OK(op, "[];[?,3];[4]", "[d1_0,4]"); INFER_OK(op, "[];[1,3];[4]", "[d1_0,4]"); INFER_OK(op, "[];[1,?];[4]", "[d1_0,4]"); } TEST(LookupOpsTest, LookupTableExportV2_ShapeFn) { ShapeInferenceTestOp op("LookupTableExportV2"); TF_ASSERT_OK(NodeDefBuilder("test", "LookupTableExportV2") .Input({"table_handle", 0, DT_RESOURCE}) .Attr("Tkeys", DT_INT64) .Attr("Tvalues", DT_FLOAT) .Finalize(&op.node_def)); std::vector<std::vector<ShapeInferenceTestOp::ShapeAndType>> types; auto set_types = [&op, &types](DataType key_type, DataType value_type) { types.emplace_back(); auto& table = types.back(); table.emplace_back("[3]", key_type); table.emplace_back("[4]", value_type); op.input_resource_handle_shapes_and_types = {&table}; }; set_types(DT_INT32, DT_FLOAT); INFER_ERROR("read value with wrong dtype", op, "[]"); set_types(DT_INT64, DT_INT64); INFER_ERROR("read value with wrong dtype", op, "[]"); set_types(DT_INT64, DT_FLOAT); INFER_OK(op, "[]", "?;?"); } } }
Status ScalarAndTwoElementVectorInputsAndScalarOutputs(InferenceContext* c) { ShapeHandle handle; DimensionHandle unused_handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle)); for (int i = 1; i < c->num_inputs(); ++i) { TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle)); TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle)); } for (int i = 0; i < c->num_outputs(); ++i) { c->set_output(i, c->Scalar()); } return absl::OkStatus(); }
#include "quiche/quic/core/http/http_encoder.h" #include <algorithm> #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "quiche/quic/core/crypto/quic_random.h" #include "quiche/quic/core/quic_data_writer.h" #include "quiche/quic/core/quic_types.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/quic/platform/api/quic_flag_utils.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_logging.h" namespace quic { namespace { bool WriteFrameHeader(QuicByteCount length, HttpFrameType type, QuicDataWriter* writer) { return writer->WriteVarInt62(static_cast<uint64_t>(type)) && writer->WriteVarInt62(length); } QuicByteCount GetTotalLength(QuicByteCount payload_length, HttpFrameType type) { return QuicDataWriter::GetVarInt62Len(payload_length) + QuicDataWriter::GetVarInt62Len(static_cast<uint64_t>(type)) + payload_length; } } QuicByteCount HttpEncoder::GetDataFrameHeaderLength( QuicByteCount payload_length) { QUICHE_DCHECK_NE(0u, payload_length); return QuicDataWriter::GetVarInt62Len(payload_length) + QuicDataWriter::GetVarInt62Len( static_cast<uint64_t>(HttpFrameType::DATA)); } quiche::QuicheBuffer HttpEncoder::SerializeDataFrameHeader( QuicByteCount payload_length, quiche::QuicheBufferAllocator* allocator) { QUICHE_DCHECK_NE(0u, payload_length); QuicByteCount header_length = GetDataFrameHeaderLength(payload_length); quiche::QuicheBuffer header(allocator, header_length); QuicDataWriter writer(header.size(), header.data()); if (WriteFrameHeader(payload_length, HttpFrameType::DATA, &writer)) { return header; } QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize data frame header."; return quiche::QuicheBuffer(); } std::string HttpEncoder::SerializeHeadersFrameHeader( QuicByteCount payload_length) { QUICHE_DCHECK_NE(0u, payload_length); QuicByteCount header_length = QuicDataWriter::GetVarInt62Len(payload_length) + QuicDataWriter::GetVarInt62Len( static_cast<uint64_t>(HttpFrameType::HEADERS)); std::string frame; frame.resize(header_length); QuicDataWriter writer(header_length, frame.data()); if (WriteFrameHeader(payload_length, HttpFrameType::HEADERS, &writer)) { return frame; } QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize headers " "frame header."; return {}; } std::string HttpEncoder::SerializeSettingsFrame(const SettingsFrame& settings) { QuicByteCount payload_length = 0; std::vector<std::pair<uint64_t, uint64_t>> ordered_settings{ settings.values.begin(), settings.values.end()}; std::sort(ordered_settings.begin(), ordered_settings.end()); for (const auto& p : ordered_settings) { payload_length += QuicDataWriter::GetVarInt62Len(p.first); payload_length += QuicDataWriter::GetVarInt62Len(p.second); } QuicByteCount total_length = GetTotalLength(payload_length, HttpFrameType::SETTINGS); std::string frame; frame.resize(total_length); QuicDataWriter writer(total_length, frame.data()); if (!WriteFrameHeader(payload_length, HttpFrameType::SETTINGS, &writer)) { QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize " "settings frame header."; return {}; } for (const auto& p : ordered_settings) { if (!writer.WriteVarInt62(p.first) || !writer.WriteVarInt62(p.second)) { QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize " "settings frame payload."; return {}; } } return frame; } std::string HttpEncoder::SerializeGoAwayFrame(const GoAwayFrame& goaway) { QuicByteCount payload_length = QuicDataWriter::GetVarInt62Len(goaway.id); QuicByteCount total_length = GetTotalLength(payload_length, HttpFrameType::GOAWAY); std::string frame; frame.resize(total_length); QuicDataWriter writer(total_length, frame.data()); if (WriteFrameHeader(payload_length, HttpFrameType::GOAWAY, &writer) && writer.WriteVarInt62(goaway.id)) { return frame; } QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize goaway frame."; return {}; } std::string HttpEncoder::SerializePriorityUpdateFrame( const PriorityUpdateFrame& priority_update) { QuicByteCount payload_length = QuicDataWriter::GetVarInt62Len(priority_update.prioritized_element_id) + priority_update.priority_field_value.size(); QuicByteCount total_length = GetTotalLength( payload_length, HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM); std::string frame; frame.resize(total_length); QuicDataWriter writer(total_length, frame.data()); if (WriteFrameHeader(payload_length, HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM, &writer) && writer.WriteVarInt62(priority_update.prioritized_element_id) && writer.WriteBytes(priority_update.priority_field_value.data(), priority_update.priority_field_value.size())) { return frame; } QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize " "PRIORITY_UPDATE frame."; return {}; } std::string HttpEncoder::SerializeAcceptChFrame( const AcceptChFrame& accept_ch) { QuicByteCount payload_length = 0; for (const auto& entry : accept_ch.entries) { payload_length += QuicDataWriter::GetVarInt62Len(entry.origin.size()); payload_length += entry.origin.size(); payload_length += QuicDataWriter::GetVarInt62Len(entry.value.size()); payload_length += entry.value.size(); } QuicByteCount total_length = GetTotalLength(payload_length, HttpFrameType::ACCEPT_CH); std::string frame; frame.resize(total_length); QuicDataWriter writer(total_length, frame.data()); if (!WriteFrameHeader(payload_length, HttpFrameType::ACCEPT_CH, &writer)) { QUIC_DLOG(ERROR) << "Http encoder failed to serialize ACCEPT_CH frame header."; return {}; } for (const auto& entry : accept_ch.entries) { if (!writer.WriteStringPieceVarInt62(entry.origin) || !writer.WriteStringPieceVarInt62(entry.value)) { QUIC_DLOG(ERROR) << "Http encoder failed to serialize ACCEPT_CH frame payload."; return {}; } } return frame; } std::string HttpEncoder::SerializeGreasingFrame() { uint64_t frame_type; QuicByteCount payload_length; std::string payload; if (!GetQuicFlag(quic_enable_http3_grease_randomness)) { frame_type = 0x40; payload_length = 1; payload = "a"; } else { uint32_t result; QuicRandom::GetInstance()->RandBytes(&result, sizeof(result)); frame_type = 0x1fULL * static_cast<uint64_t>(result) + 0x21ULL; payload_length = result % 4; if (payload_length > 0) { payload.resize(payload_length); QuicRandom::GetInstance()->RandBytes(payload.data(), payload_length); } } QuicByteCount total_length = QuicDataWriter::GetVarInt62Len(frame_type) + QuicDataWriter::GetVarInt62Len(payload_length) + payload_length; std::string frame; frame.resize(total_length); QuicDataWriter writer(total_length, frame.data()); bool success = writer.WriteVarInt62(frame_type) && writer.WriteVarInt62(payload_length); if (payload_length > 0) { success &= writer.WriteBytes(payload.data(), payload_length); } if (success) { return frame; } QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize " "greasing frame."; return {}; } std::string HttpEncoder::SerializeWebTransportStreamFrameHeader( WebTransportSessionId session_id) { uint64_t stream_type = static_cast<uint64_t>(HttpFrameType::WEBTRANSPORT_STREAM); QuicByteCount header_length = QuicDataWriter::GetVarInt62Len(stream_type) + QuicDataWriter::GetVarInt62Len(session_id); std::string frame; frame.resize(header_length); QuicDataWriter writer(header_length, frame.data()); bool success = writer.WriteVarInt62(stream_type) && writer.WriteVarInt62(session_id); if (success && writer.remaining() == 0) { return frame; } QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize " "WEBTRANSPORT_STREAM frame header."; return {}; } std::string HttpEncoder::SerializeMetadataFrameHeader( QuicByteCount payload_length) { QUICHE_DCHECK_NE(0u, payload_length); QuicByteCount header_length = QuicDataWriter::GetVarInt62Len(payload_length) + QuicDataWriter::GetVarInt62Len( static_cast<uint64_t>(HttpFrameType::METADATA)); std::string frame; frame.resize(header_length); QuicDataWriter writer(header_length, frame.data()); if (WriteFrameHeader(payload_length, HttpFrameType::METADATA, &writer)) { return frame; } QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize METADATA " "frame header."; return {}; } }
#include "quiche/quic/core/http/http_encoder.h" #include <string> #include "absl/base/macros.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/quic_test_utils.h" #include "quiche/common/simple_buffer_allocator.h" #include "quiche/common/test_tools/quiche_test_utils.h" namespace quic { namespace test { TEST(HttpEncoderTest, SerializeDataFrameHeader) { quiche::QuicheBuffer buffer = HttpEncoder::SerializeDataFrameHeader( 5, quiche::SimpleBufferAllocator::Get()); char output[] = {0x00, 0x05}; EXPECT_EQ(ABSL_ARRAYSIZE(output), buffer.size()); quiche::test::CompareCharArraysWithHexError( "DATA", buffer.data(), buffer.size(), output, ABSL_ARRAYSIZE(output)); } TEST(HttpEncoderTest, SerializeHeadersFrameHeader) { std::string header = HttpEncoder::SerializeHeadersFrameHeader( 7); char output[] = {0x01, 0x07}; quiche::test::CompareCharArraysWithHexError("HEADERS", header.data(), header.length(), output, ABSL_ARRAYSIZE(output)); } TEST(HttpEncoderTest, SerializeSettingsFrame) { SettingsFrame settings; settings.values[1] = 2; settings.values[6] = 5; settings.values[256] = 4; char output[] = {0x04, 0x07, 0x01, 0x02, 0x06, 0x05, 0x41, 0x00, 0x04}; std::string frame = HttpEncoder::SerializeSettingsFrame(settings); quiche::test::CompareCharArraysWithHexError( "SETTINGS", frame.data(), frame.length(), output, ABSL_ARRAYSIZE(output)); } TEST(HttpEncoderTest, SerializeGoAwayFrame) { GoAwayFrame goaway; goaway.id = 0x1; char output[] = {0x07, 0x1, 0x01}; std::string frame = HttpEncoder::SerializeGoAwayFrame(goaway); quiche::test::CompareCharArraysWithHexError( "GOAWAY", frame.data(), frame.length(), output, ABSL_ARRAYSIZE(output)); } TEST(HttpEncoderTest, SerializePriorityUpdateFrame) { PriorityUpdateFrame priority_update1; priority_update1.prioritized_element_id = 0x03; uint8_t output1[] = {0x80, 0x0f, 0x07, 0x00, 0x01, 0x03}; std::string frame1 = HttpEncoder::SerializePriorityUpdateFrame(priority_update1); quiche::test::CompareCharArraysWithHexError( "PRIORITY_UPDATE", frame1.data(), frame1.length(), reinterpret_cast<char*>(output1), ABSL_ARRAYSIZE(output1)); PriorityUpdateFrame priority_update2; priority_update2.prioritized_element_id = 0x05; priority_update2.priority_field_value = "foo"; uint8_t output2[] = {0x80, 0x0f, 0x07, 0x00, 0x04, 0x05, 0x66, 0x6f, 0x6f}; std::string frame2 = HttpEncoder::SerializePriorityUpdateFrame(priority_update2); quiche::test::CompareCharArraysWithHexError( "PRIORITY_UPDATE", frame2.data(), frame2.length(), reinterpret_cast<char*>(output2), ABSL_ARRAYSIZE(output2)); } TEST(HttpEncoderTest, SerializeAcceptChFrame) { AcceptChFrame accept_ch; uint8_t output1[] = {0x40, 0x89, 0x00}; std::string frame1 = HttpEncoder::SerializeAcceptChFrame(accept_ch); quiche::test::CompareCharArraysWithHexError( "ACCEPT_CH", frame1.data(), frame1.length(), reinterpret_cast<char*>(output1), ABSL_ARRAYSIZE(output1)); accept_ch.entries.push_back({"foo", "bar"}); uint8_t output2[] = {0x40, 0x89, 0x08, 0x03, 0x66, 0x6f, 0x6f, 0x03, 0x62, 0x61, 0x72}; std::string frame2 = HttpEncoder::SerializeAcceptChFrame(accept_ch); quiche::test::CompareCharArraysWithHexError( "ACCEPT_CH", frame2.data(), frame2.length(), reinterpret_cast<char*>(output2), ABSL_ARRAYSIZE(output2)); } TEST(HttpEncoderTest, SerializeWebTransportStreamFrameHeader) { WebTransportSessionId session_id = 0x17; char output[] = {0x40, 0x41, 0x17}; std::string frame = HttpEncoder::SerializeWebTransportStreamFrameHeader(session_id); quiche::test::CompareCharArraysWithHexError("WEBTRANSPORT_STREAM", frame.data(), frame.length(), output, sizeof(output)); } TEST(HttpEncoderTest, SerializeMetadataFrameHeader) { std::string frame = HttpEncoder::SerializeMetadataFrameHeader( 7); char output[] = {0x40, 0x4d, 0x07}; quiche::test::CompareCharArraysWithHexError( "METADATA", frame.data(), frame.length(), output, ABSL_ARRAYSIZE(output)); } } }
std::string HttpEncoder::SerializeHeadersFrameHeader( QuicByteCount payload_length) { QUICHE_DCHECK_NE(0u, payload_length); QuicByteCount header_length = QuicDataWriter::GetVarInt62Len(payload_length) + QuicDataWriter::GetVarInt62Len( static_cast<uint64_t>(HttpFrameType::HEADERS)); std::string frame; frame.resize(header_length); QuicDataWriter writer(header_length, frame.data()); if (WriteFrameHeader(payload_length, HttpFrameType::HEADERS, &writer)) { return frame; } QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize headers " "frame header."; return {}; }
TEST(HttpEncoderTest, SerializeHeadersFrameHeader) { std::string header = HttpEncoder::SerializeHeadersFrameHeader( 7); char output[] = {0x01, 0x07}; quiche::test::CompareCharArraysWithHexError("HEADERS", header.data(), header.length(), output, ABSL_ARRAYSIZE(output)); }
#include "absl/flags/parse.h" #include <stdlib.h> #include <algorithm> #include <cstdint> #include <cstdlib> #include <fstream> #include <iostream> #include <ostream> #include <string> #include <tuple> #include <utility> #include <vector> #ifdef _WIN32 #include <windows.h> #endif #include "absl/algorithm/container.h" #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/const_init.h" #include "absl/base/thread_annotations.h" #include "absl/flags/commandlineflag.h" #include "absl/flags/config.h" #include "absl/flags/flag.h" #include "absl/flags/internal/commandlineflag.h" #include "absl/flags/internal/flag.h" #include "absl/flags/internal/parse.h" #include "absl/flags/internal/private_handle_accessor.h" #include "absl/flags/internal/program_name.h" #include "absl/flags/internal/usage.h" #include "absl/flags/reflection.h" #include "absl/flags/usage.h" #include "absl/flags/usage_config.h" #include "absl/strings/ascii.h" #include "absl/strings/internal/damerau_levenshtein_distance.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/strings/strip.h" #include "absl/synchronization/mutex.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace flags_internal { namespace { ABSL_CONST_INIT absl::Mutex processing_checks_guard(absl::kConstInit); ABSL_CONST_INIT bool flagfile_needs_processing ABSL_GUARDED_BY(processing_checks_guard) = false; ABSL_CONST_INIT bool fromenv_needs_processing ABSL_GUARDED_BY(processing_checks_guard) = false; ABSL_CONST_INIT bool tryfromenv_needs_processing ABSL_GUARDED_BY(processing_checks_guard) = false; ABSL_CONST_INIT absl::Mutex specified_flags_guard(absl::kConstInit); ABSL_CONST_INIT std::vector<const CommandLineFlag*>* specified_flags ABSL_GUARDED_BY(specified_flags_guard) = nullptr; ABSL_CONST_INIT const size_t kMaxHints = 100; ABSL_CONST_INIT const size_t kMaxDistance = 3; struct SpecifiedFlagsCompare { bool operator()(const CommandLineFlag* a, const CommandLineFlag* b) const { return a->Name() < b->Name(); } bool operator()(const CommandLineFlag* a, absl::string_view b) const { return a->Name() < b; } bool operator()(absl::string_view a, const CommandLineFlag* b) const { return a < b->Name(); } }; } } ABSL_NAMESPACE_END } ABSL_FLAG(std::vector<std::string>, flagfile, {}, "comma-separated list of files to load flags from") .OnUpdate([]() { if (absl::GetFlag(FLAGS_flagfile).empty()) return; absl::MutexLock l(&absl::flags_internal::processing_checks_guard); if (absl::flags_internal::flagfile_needs_processing) { ABSL_INTERNAL_LOG(WARNING, "flagfile set twice before it is handled"); } absl::flags_internal::flagfile_needs_processing = true; }); ABSL_FLAG(std::vector<std::string>, fromenv, {}, "comma-separated list of flags to set from the environment" " [use 'export FLAGS_flag1=value']") .OnUpdate([]() { if (absl::GetFlag(FLAGS_fromenv).empty()) return; absl::MutexLock l(&absl::flags_internal::processing_checks_guard); if (absl::flags_internal::fromenv_needs_processing) { ABSL_INTERNAL_LOG(WARNING, "fromenv set twice before it is handled."); } absl::flags_internal::fromenv_needs_processing = true; }); ABSL_FLAG(std::vector<std::string>, tryfromenv, {}, "comma-separated list of flags to try to set from the environment if " "present") .OnUpdate([]() { if (absl::GetFlag(FLAGS_tryfromenv).empty()) return; absl::MutexLock l(&absl::flags_internal::processing_checks_guard); if (absl::flags_internal::tryfromenv_needs_processing) { ABSL_INTERNAL_LOG(WARNING, "tryfromenv set twice before it is handled."); } absl::flags_internal::tryfromenv_needs_processing = true; }); ABSL_FLAG(std::vector<std::string>, undefok, {}, "comma-separated list of flag names that it is okay to specify " "on the command line even if the program does not define a flag " "with that name"); namespace absl { ABSL_NAMESPACE_BEGIN namespace flags_internal { namespace { class ArgsList { public: ArgsList() : next_arg_(0) {} ArgsList(int argc, char* argv[]) : args_(argv, argv + argc), next_arg_(0) {} explicit ArgsList(const std::vector<std::string>& args) : args_(args), next_arg_(0) {} bool ReadFromFlagfile(const std::string& flag_file_name); size_t Size() const { return args_.size() - next_arg_; } size_t FrontIndex() const { return next_arg_; } absl::string_view Front() const { return args_[next_arg_]; } void PopFront() { next_arg_++; } private: std::vector<std::string> args_; size_t next_arg_; }; bool ArgsList::ReadFromFlagfile(const std::string& flag_file_name) { std::ifstream flag_file(flag_file_name); if (!flag_file) { flags_internal::ReportUsageError( absl::StrCat("Can't open flagfile ", flag_file_name), true); return false; } args_.emplace_back(""); std::string line; bool success = true; while (std::getline(flag_file, line)) { absl::string_view stripped = absl::StripLeadingAsciiWhitespace(line); if (stripped.empty() || stripped[0] == '#') { continue; } if (stripped[0] == '-') { if (stripped == "--") { flags_internal::ReportUsageError( "Flagfile can't contain position arguments or --", true); success = false; break; } args_.emplace_back(stripped); continue; } flags_internal::ReportUsageError( absl::StrCat("Unexpected line in the flagfile ", flag_file_name, ": ", line), true); success = false; } return success; } bool GetEnvVar(const char* var_name, std::string& var_value) { #ifdef _WIN32 char buf[1024]; auto get_res = GetEnvironmentVariableA(var_name, buf, sizeof(buf)); if (get_res >= sizeof(buf)) { return false; } if (get_res == 0) { return false; } var_value = std::string(buf, get_res); #else const char* val = ::getenv(var_name); if (val == nullptr) { return false; } var_value = val; #endif return true; } std::tuple<absl::string_view, absl::string_view, bool> SplitNameAndValue( absl::string_view arg) { absl::ConsumePrefix(&arg, "-"); if (arg.empty()) { return std::make_tuple("", "", false); } auto equal_sign_pos = arg.find('='); absl::string_view flag_name = arg.substr(0, equal_sign_pos); absl::string_view value; bool is_empty_value = false; if (equal_sign_pos != absl::string_view::npos) { value = arg.substr(equal_sign_pos + 1); is_empty_value = value.empty(); } return std::make_tuple(flag_name, value, is_empty_value); } std::tuple<CommandLineFlag*, bool> LocateFlag(absl::string_view flag_name) { CommandLineFlag* flag = absl::FindCommandLineFlag(flag_name); bool is_negative = false; if (!flag && absl::ConsumePrefix(&flag_name, "no")) { flag = absl::FindCommandLineFlag(flag_name); is_negative = true; } return std::make_tuple(flag, is_negative); } void CheckDefaultValuesParsingRoundtrip() { #ifndef NDEBUG flags_internal::ForEachFlag([&](CommandLineFlag& flag) { if (flag.IsRetired()) return; #define ABSL_FLAGS_INTERNAL_IGNORE_TYPE(T, _) \ if (flag.IsOfType<T>()) return; ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(ABSL_FLAGS_INTERNAL_IGNORE_TYPE) #undef ABSL_FLAGS_INTERNAL_IGNORE_TYPE flags_internal::PrivateHandleAccessor::CheckDefaultValueParsingRoundtrip( flag); }); #endif } bool ReadFlagfiles(const std::vector<std::string>& flagfiles, std::vector<ArgsList>& input_args) { bool success = true; for (auto it = flagfiles.rbegin(); it != flagfiles.rend(); ++it) { ArgsList al; if (al.ReadFromFlagfile(*it)) { input_args.push_back(al); } else { success = false; } } return success; } bool ReadFlagsFromEnv(const std::vector<std::string>& flag_names, std::vector<ArgsList>& input_args, bool fail_on_absent_in_env) { bool success = true; std::vector<std::string> args; args.emplace_back(""); for (const auto& flag_name : flag_names) { if (flag_name == "fromenv" || flag_name == "tryfromenv") { flags_internal::ReportUsageError( absl::StrCat("Infinite recursion on flag ", flag_name), true); success = false; continue; } const std::string envname = absl::StrCat("FLAGS_", flag_name); std::string envval; if (!GetEnvVar(envname.c_str(), envval)) { if (fail_on_absent_in_env) { flags_internal::ReportUsageError( absl::StrCat(envname, " not found in environment"), true); success = false; } continue; } args.push_back(absl::StrCat("--", flag_name, "=", envval)); } if (success) { input_args.emplace_back(args); } return success; } bool HandleGeneratorFlags(std::vector<ArgsList>& input_args, std::vector<std::string>& flagfile_value) { bool success = true; absl::MutexLock l(&flags_internal::processing_checks_guard); if (flags_internal::flagfile_needs_processing) { auto flagfiles = absl::GetFlag(FLAGS_flagfile); if (input_args.size() == 1) { flagfile_value.insert(flagfile_value.end(), flagfiles.begin(), flagfiles.end()); } success &= ReadFlagfiles(flagfiles, input_args); flags_internal::flagfile_needs_processing = false; } if (flags_internal::fromenv_needs_processing) { auto flags_list = absl::GetFlag(FLAGS_fromenv); success &= ReadFlagsFromEnv(flags_list, input_args, true); flags_internal::fromenv_needs_processing = false; } if (flags_internal::tryfromenv_needs_processing) { auto flags_list = absl::GetFlag(FLAGS_tryfromenv); success &= ReadFlagsFromEnv(flags_list, input_args, false); flags_internal::tryfromenv_needs_processing = false; } return success; } void ResetGeneratorFlags(const std::vector<std::string>& flagfile_value) { if (!flagfile_value.empty()) { absl::SetFlag(&FLAGS_flagfile, flagfile_value); absl::MutexLock l(&flags_internal::processing_checks_guard); flags_internal::flagfile_needs_processing = false; } if (!absl::GetFlag(FLAGS_fromenv).empty()) { absl::SetFlag(&FLAGS_fromenv, {}); } if (!absl::GetFlag(FLAGS_tryfromenv).empty()) { absl::SetFlag(&FLAGS_tryfromenv, {}); } absl::MutexLock l(&flags_internal::processing_checks_guard); flags_internal::fromenv_needs_processing = false; flags_internal::tryfromenv_needs_processing = false; } std::tuple<bool, absl::string_view> DeduceFlagValue(const CommandLineFlag& flag, absl::string_view value, bool is_negative, bool is_empty_value, ArgsList* curr_list) { if (flag.IsOfType<bool>()) { if (value.empty()) { if (is_empty_value) { flags_internal::ReportUsageError( absl::StrCat( "Missing the value after assignment for the boolean flag '", flag.Name(), "'"), true); return std::make_tuple(false, ""); } value = is_negative ? "0" : "1"; } else if (is_negative) { flags_internal::ReportUsageError( absl::StrCat("Negative form with assignment is not valid for the " "boolean flag '", flag.Name(), "'"), true); return std::make_tuple(false, ""); } } else if (is_negative) { flags_internal::ReportUsageError( absl::StrCat("Negative form is not valid for the flag '", flag.Name(), "'"), true); return std::make_tuple(false, ""); } else if (value.empty() && (!is_empty_value)) { if (curr_list->Size() == 1) { flags_internal::ReportUsageError( absl::StrCat("Missing the value for the flag '", flag.Name(), "'"), true); return std::make_tuple(false, ""); } curr_list->PopFront(); value = curr_list->Front(); if (!value.empty() && value[0] == '-' && flag.IsOfType<std::string>()) { auto maybe_flag_name = std::get<0>(SplitNameAndValue(value.substr(1))); if (maybe_flag_name.empty() || std::get<0>(LocateFlag(maybe_flag_name)) != nullptr) { ABSL_INTERNAL_LOG( WARNING, absl::StrCat("Did you really mean to set flag '", flag.Name(), "' to the value '", value, "'?")); } } } return std::make_tuple(true, value); } bool CanIgnoreUndefinedFlag(absl::string_view flag_name) { auto undefok = absl::GetFlag(FLAGS_undefok); if (std::find(undefok.begin(), undefok.end(), flag_name) != undefok.end()) { return true; } if (absl::ConsumePrefix(&flag_name, "no") && std::find(undefok.begin(), undefok.end(), flag_name) != undefok.end()) { return true; } return false; } void ReportUnrecognizedFlags( const std::vector<UnrecognizedFlag>& unrecognized_flags, bool report_as_fatal_error) { for (const auto& unrecognized : unrecognized_flags) { std::vector<std::string> misspelling_hints; if (unrecognized.source == UnrecognizedFlag::kFromArgv) { misspelling_hints = flags_internal::GetMisspellingHints(unrecognized.flag_name); } if (misspelling_hints.empty()) { flags_internal::ReportUsageError( absl::StrCat("Unknown command line flag '", unrecognized.flag_name, "'"), report_as_fatal_error); } else { flags_internal::ReportUsageError( absl::StrCat("Unknown command line flag '", unrecognized.flag_name, "'. Did you mean: ", absl::StrJoin(misspelling_hints, ", "), " ?"), report_as_fatal_error); } } } } bool WasPresentOnCommandLine(absl::string_view flag_name) { absl::ReaderMutexLock l(&specified_flags_guard); ABSL_INTERNAL_CHECK(specified_flags != nullptr, "ParseCommandLine is not invoked yet"); return std::binary_search(specified_flags->begin(), specified_flags->end(), flag_name, SpecifiedFlagsCompare{}); } struct BestHints { explicit BestHints(uint8_t _max) : best_distance(_max + 1) {} bool AddHint(absl::string_view hint, uint8_t distance) { if (hints.size() >= kMaxHints) return false; if (distance == best_distance) { hints.emplace_back(hint); } if (distance < best_distance) { best_distance = distance; hints = std::vector<std::string>{std::string(hint)}; } return true; } uint8_t best_distance; std::vector<std::string> hints; }; std::vector<std::string> GetMisspellingHints(const absl::string_view flag) { const size_t maxCutoff = std::min(flag.size() / 2 + 1, kMaxDistance); auto undefok = absl::GetFlag(FLAGS_undefok); BestHints best_hints(static_cast<uint8_t>(maxCutoff)); flags_internal::ForEachFlag([&](const CommandLineFlag& f) { if (best_hints.hints.size() >= kMaxHints) return; uint8_t distance = strings_internal::CappedDamerauLevenshteinDistance( flag, f.Name(), best_hints.best_distance); best_hints.AddHint(f.Name(), distance); if (f.IsOfType<bool>()) { const std::string negated_flag = absl::StrCat("no", f.Name()); distance = strings_internal::CappedDamerauLevenshteinDistance( flag, negated_flag, best_hints.best_distance); best_hints.AddHint(negated_flag, distance); } }); absl::c_for_each(undefok, [&](const absl::string_view f) { if (best_hints.hints.size() >= kMaxHints) return; uint8_t distance = strings_internal::CappedDamerauLevenshteinDistance( flag, f, best_hints.best_distance); best_hints.AddHint(absl::StrCat(f, " (undefok)"), distance); }); return best_hints.hints; } std::vector<char*> ParseCommandLineImpl(int argc, char* argv[], UsageFlagsAction usage_flag_action, OnUndefinedFlag undef_flag_action, std::ostream& error_help_output) { std::vector<char*> positional_args; std::vector<UnrecognizedFlag> unrecognized_flags; auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl( argc, argv, positional_args, unrecognized_flags, usage_flag_action); if (undef_flag_action != OnUndefinedFlag::kIgnoreUndefined) { flags_internal::ReportUnrecognizedFlags( unrecognized_flags, (undef_flag_action == OnUndefinedFlag::kAbortIfUndefined)); if (undef_flag_action == OnUndefinedFlag::kAbortIfUndefined) { if (!unrecognized_flags.empty()) { flags_internal::HandleUsageFlags(error_help_output, ProgramUsageMessage()); std::exit(1); } } } flags_internal::MaybeExit(help_mode); return positional_args; } HelpMode ParseAbseilFlagsOnlyImpl( int argc, char* argv[], std::vector<char*>& positional_args, std::vector<UnrecognizedFlag>& unrecognized_flags, UsageFlagsAction usage_flag_action) { ABSL_INTERNAL_CHECK(argc > 0, "Missing argv[0]"); using flags_internal::ArgsList; using flags_internal::specified_flags; std::vector<std::string> flagfile_value; std::vector<ArgsList> input_args; flags_internal::FinalizeRegistry(); flags_internal::CheckDefaultValuesParsingRoundtrip(); input_args.push_back(ArgsList(argc, argv)); if (flags_internal::ProgramInvocationName() == "UNKNOWN") { flags_internal::SetProgramInvocationName(argv[0]); } positional_args.push_back(argv[0]); absl::MutexLock l(&flags_internal::specified_flags_guard); if (specified_flags == nullptr) { specified_flags = new std::vector<const CommandLineFlag*>; } else { specified_flags->clear(); } bool success = true; while (!input_args.empty()) { success &= flags_internal::HandleGeneratorFlags(input_args, flagfile_value); ArgsList& curr_list = input_args.back(); curr_list.PopFront(); if (curr_list.Size() == 0) { input_args.pop_back(); continue; } absl::string_view arg(curr_list.Front()); bool arg_from_argv = input_args.size() == 1; if (!absl::ConsumePrefix(&arg, "-") || arg.empty()) { ABSL_INTERNAL_CHECK(arg_from_argv, "Flagfile cannot contain positional argument"); positional_args.push_back(argv[curr_list.FrontIndex()]); continue; } absl::string_view flag_name; absl::string_view value; bool is_empty_value = false; std::tie(flag_name, value, is_empty_value) = flags_internal::SplitNameAndValue(arg); if (flag_name.empty()) { ABSL_INTERNAL_CHECK(arg_from_argv, "Flagfile cannot contain positional argument"); curr_list.PopFront(); break; } CommandLineFlag* flag = nullptr; bool is_negative = false; std::tie(flag, is_negative) = flags_internal::LocateFlag(flag_name); if (flag == nullptr) { if (flags_internal::DeduceUsageFlags(flag_name, value)) { continue; } unrecognized_flags.emplace_back(arg_from_argv ? UnrecognizedFlag::kFromArgv : UnrecognizedFlag::kFromFlagfile, flag_name); continue; } bool value_success = true; std::tie(value_success, value) = flags_internal::DeduceFlagValue( *flag, value, is_negative, is_empty_value, &curr_list); success &= value_success; std::string error; if (!flags_internal::PrivateHandleAccessor::ParseFrom( *flag, value, flags_internal::SET_FLAGS_VALUE, flags_internal::kCommandLine, error)) { if (flag->IsRetired()) continue; flags_internal::ReportUsageError(error, true); success = false; } else { specified_flags->push_back(flag); } } flags_internal::ResetGeneratorFlags(flagfile_value); if (!input_args.empty()) { for (size_t arg_index = input_args.back().FrontIndex(); arg_index < static_cast<size_t>(argc); ++arg_index) { positional_args.push_back(argv[arg_index]); } } specified_flags->shrink_to_fit(); std::sort(specified_flags->begin(), specified_flags->end(), flags_internal::SpecifiedFlagsCompare{}); std::vector<UnrecognizedFlag> filtered; filtered.reserve(unrecognized_flags.size()); for (const auto& unrecognized : unrecognized_flags) { if (flags_internal::CanIgnoreUndefinedFlag(unrecognized.flag_name)) continue; filtered.push_back(unrecognized); } std::swap(unrecognized_flags, filtered); if (!success) { #if ABSL_FLAGS_STRIP_NAMES flags_internal::ReportUsageError( "NOTE: command line flags are disabled in this build", true); #else flags_internal::HandleUsageFlags(std::cerr, ProgramUsageMessage()); #endif return HelpMode::kFull; } return usage_flag_action == UsageFlagsAction::kHandleUsage ? flags_internal::HandleUsageFlags(std::cout, ProgramUsageMessage()) : HelpMode::kNone; } } void ParseAbseilFlagsOnly(int argc, char* argv[], std::vector<char*>& positional_args, std::vector<UnrecognizedFlag>& unrecognized_flags) { auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl( argc, argv, positional_args, unrecognized_flags, flags_internal::UsageFlagsAction::kHandleUsage); flags_internal::MaybeExit(help_mode); } void ReportUnrecognizedFlags( const std::vector<UnrecognizedFlag>& unrecognized_flags) { flags_internal::ReportUnrecognizedFlags(unrecognized_flags, true); } std::vector<char*> ParseCommandLine(int argc, char* argv[]) { return flags_internal::ParseCommandLineImpl( argc, argv, flags_internal::UsageFlagsAction::kHandleUsage, flags_internal::OnUndefinedFlag::kAbortIfUndefined); } ABSL_NAMESPACE_END }
#include "absl/flags/parse.h" #include <stdlib.h> #include <fstream> #include <iostream> #include <string> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/internal/scoped_set_env.h" #include "absl/flags/config.h" #include "absl/flags/flag.h" #include "absl/flags/internal/parse.h" #include "absl/flags/internal/usage.h" #include "absl/flags/reflection.h" #include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "absl/types/span.h" #ifdef _WIN32 #include <windows.h> #endif #define FLAG_MULT(x) F3(x) #define TEST_FLAG_HEADER FLAG_HEADER_ #define F(name) ABSL_FLAG(int, name, 0, "") #define F1(name) \ F(name##1); \ F(name##2); \ F(name##3); \ F(name##4); \ F(name##5) #define F2(name) \ F1(name##1); \ F1(name##2); \ F1(name##3); \ F1(name##4); \ F1(name##5) #define F3(name) \ F2(name##1); \ F2(name##2); \ F2(name##3); \ F2(name##4); \ F2(name##5) FLAG_MULT(TEST_FLAG_HEADER); namespace { using absl::base_internal::ScopedSetEnv; struct UDT { UDT() = default; UDT(const UDT&) = default; UDT& operator=(const UDT&) = default; UDT(int v) : value(v) {} int value; }; bool AbslParseFlag(absl::string_view in, UDT* udt, std::string* err) { if (in == "A") { udt->value = 1; return true; } if (in == "AAA") { udt->value = 10; return true; } *err = "Use values A, AAA instead"; return false; } std::string AbslUnparseFlag(const UDT& udt) { return udt.value == 1 ? "A" : "AAA"; } std::string GetTestTmpDirEnvVar(const char* const env_var_name) { #ifdef _WIN32 char buf[MAX_PATH]; auto get_res = GetEnvironmentVariableA(env_var_name, buf, sizeof(buf)); if (get_res >= sizeof(buf) || get_res == 0) { return ""; } return std::string(buf, get_res); #else const char* val = ::getenv(env_var_name); if (val == nullptr) { return ""; } return val; #endif } const std::string& GetTestTempDir() { static std::string* temp_dir_name = []() -> std::string* { std::string* res = new std::string(GetTestTmpDirEnvVar("TEST_TMPDIR")); if (res->empty()) { *res = GetTestTmpDirEnvVar("TMPDIR"); } if (res->empty()) { #ifdef _WIN32 char temp_path_buffer[MAX_PATH]; auto len = GetTempPathA(MAX_PATH, temp_path_buffer); if (len < MAX_PATH && len != 0) { std::string temp_dir_name = temp_path_buffer; if (!absl::EndsWith(temp_dir_name, "\\")) { temp_dir_name.push_back('\\'); } absl::StrAppend(&temp_dir_name, "parse_test.", GetCurrentProcessId()); if (CreateDirectoryA(temp_dir_name.c_str(), nullptr)) { *res = temp_dir_name; } } #else char temp_dir_template[] = "/tmp/parse_test.XXXXXX"; if (auto* unique_name = ::mkdtemp(temp_dir_template)) { *res = unique_name; } #endif } if (res->empty()) { LOG(FATAL) << "Failed to make temporary directory for data files"; } #ifdef _WIN32 *res += "\\"; #else *res += "/"; #endif return res; }(); return *temp_dir_name; } struct FlagfileData { const absl::string_view file_name; const absl::Span<const char* const> file_lines; }; constexpr const char* const ff1_data[] = { "# comment ", " # comment ", "", " ", "--int_flag=-1", " --string_flag=q2w2 ", " ## ", " --double_flag=0.1", "--bool_flag=Y " }; constexpr const char* const ff2_data[] = { "# Setting legacy flag", "--legacy_int=1111", "--legacy_bool", "--nobool_flag", "--legacy_str=aqsw", "--int_flag=100", " ## =============" }; const char* GetFlagfileFlag(const std::vector<FlagfileData>& ffd, std::string& flagfile_flag) { flagfile_flag = "--flagfile="; absl::string_view separator; for (const auto& flagfile_data : ffd) { std::string flagfile_name = absl::StrCat(GetTestTempDir(), flagfile_data.file_name); std::ofstream flagfile_out(flagfile_name); for (auto line : flagfile_data.file_lines) { flagfile_out << absl::Substitute(line, GetTestTempDir()) << "\n"; } absl::StrAppend(&flagfile_flag, separator, flagfile_name); separator = ","; } return flagfile_flag.c_str(); } } ABSL_FLAG(int, int_flag, 1, ""); ABSL_FLAG(double, double_flag, 1.1, ""); ABSL_FLAG(std::string, string_flag, "a", ""); ABSL_FLAG(bool, bool_flag, false, ""); ABSL_FLAG(UDT, udt_flag, -1, ""); ABSL_RETIRED_FLAG(int, legacy_int, 1, ""); ABSL_RETIRED_FLAG(bool, legacy_bool, false, ""); ABSL_RETIRED_FLAG(std::string, legacy_str, "l", ""); namespace { namespace flags = absl::flags_internal; using testing::AllOf; using testing::ElementsAreArray; using testing::HasSubstr; class ParseTest : public testing::Test { public: ~ParseTest() override { flags::SetFlagsHelpMode(flags::HelpMode::kNone); } void SetUp() override { #if ABSL_FLAGS_STRIP_NAMES GTEST_SKIP() << "This test requires flag names to be present"; #endif } private: absl::FlagSaver flag_saver_; }; template <int N> flags::HelpMode InvokeParseAbslOnlyImpl(const char* (&in_argv)[N]) { std::vector<char*> positional_args; std::vector<absl::UnrecognizedFlag> unrecognized_flags; return flags::ParseAbseilFlagsOnlyImpl(N, const_cast<char**>(in_argv), positional_args, unrecognized_flags, flags::UsageFlagsAction::kHandleUsage); } template <int N> void InvokeParseAbslOnly(const char* (&in_argv)[N]) { std::vector<char*> positional_args; std::vector<absl::UnrecognizedFlag> unrecognized_flags; absl::ParseAbseilFlagsOnly(2, const_cast<char**>(in_argv), positional_args, unrecognized_flags); } template <int N> std::vector<char*> InvokeParseCommandLineImpl(const char* (&in_argv)[N]) { return flags::ParseCommandLineImpl( N, const_cast<char**>(in_argv), flags::UsageFlagsAction::kHandleUsage, flags::OnUndefinedFlag::kAbortIfUndefined, std::cerr); } template <int N> std::vector<char*> InvokeParse(const char* (&in_argv)[N]) { return absl::ParseCommandLine(N, const_cast<char**>(in_argv)); } template <int N> void TestParse(const char* (&in_argv)[N], int int_flag_value, double double_flag_val, absl::string_view string_flag_val, bool bool_flag_val, int exp_position_args = 0) { auto out_args = InvokeParse(in_argv); EXPECT_EQ(out_args.size(), 1 + exp_position_args); EXPECT_STREQ(out_args[0], "testbin"); EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), int_flag_value); EXPECT_NEAR(absl::GetFlag(FLAGS_double_flag), double_flag_val, 0.0001); EXPECT_EQ(absl::GetFlag(FLAGS_string_flag), string_flag_val); EXPECT_EQ(absl::GetFlag(FLAGS_bool_flag), bool_flag_val); } TEST_F(ParseTest, TestEmptyArgv) { const char* in_argv[] = {"testbin"}; auto out_args = InvokeParse(in_argv); EXPECT_EQ(out_args.size(), 1); EXPECT_STREQ(out_args[0], "testbin"); } TEST_F(ParseTest, TestValidIntArg) { const char* in_args1[] = { "testbin", "--int_flag=10", }; TestParse(in_args1, 10, 1.1, "a", false); const char* in_args2[] = { "testbin", "-int_flag=020", }; TestParse(in_args2, 20, 1.1, "a", false); const char* in_args3[] = { "testbin", "--int_flag", "-30", }; TestParse(in_args3, -30, 1.1, "a", false); const char* in_args4[] = { "testbin", "-int_flag", "0x21", }; TestParse(in_args4, 33, 1.1, "a", false); } TEST_F(ParseTest, TestValidDoubleArg) { const char* in_args1[] = { "testbin", "--double_flag=2.3", }; TestParse(in_args1, 1, 2.3, "a", false); const char* in_args2[] = { "testbin", "--double_flag=0x1.2", }; TestParse(in_args2, 1, 1.125, "a", false); const char* in_args3[] = { "testbin", "--double_flag", "99.7", }; TestParse(in_args3, 1, 99.7, "a", false); const char* in_args4[] = { "testbin", "--double_flag", "0x20.1", }; TestParse(in_args4, 1, 32.0625, "a", false); } TEST_F(ParseTest, TestValidStringArg) { const char* in_args1[] = { "testbin", "--string_flag=aqswde", }; TestParse(in_args1, 1, 1.1, "aqswde", false); const char* in_args2[] = { "testbin", "-string_flag=a=b=c", }; TestParse(in_args2, 1, 1.1, "a=b=c", false); const char* in_args3[] = { "testbin", "--string_flag", "zaxscd", }; TestParse(in_args3, 1, 1.1, "zaxscd", false); const char* in_args4[] = { "testbin", "-string_flag", "--int_flag", }; TestParse(in_args4, 1, 1.1, "--int_flag", false); const char* in_args5[] = { "testbin", "--string_flag", "--no_a_flag=11", }; TestParse(in_args5, 1, 1.1, "--no_a_flag=11", false); } TEST_F(ParseTest, TestValidBoolArg) { const char* in_args1[] = { "testbin", "--bool_flag", }; TestParse(in_args1, 1, 1.1, "a", true); const char* in_args2[] = { "testbin", "--nobool_flag", }; TestParse(in_args2, 1, 1.1, "a", false); const char* in_args3[] = { "testbin", "--bool_flag=true", }; TestParse(in_args3, 1, 1.1, "a", true); const char* in_args4[] = { "testbin", "-bool_flag=false", }; TestParse(in_args4, 1, 1.1, "a", false); } TEST_F(ParseTest, TestValidUDTArg) { const char* in_args1[] = { "testbin", "--udt_flag=A", }; InvokeParse(in_args1); EXPECT_EQ(absl::GetFlag(FLAGS_udt_flag).value, 1); const char* in_args2[] = {"testbin", "--udt_flag", "AAA"}; InvokeParse(in_args2); EXPECT_EQ(absl::GetFlag(FLAGS_udt_flag).value, 10); } TEST_F(ParseTest, TestValidMultipleArg) { const char* in_args1[] = { "testbin", "--bool_flag", "--int_flag=2", "--double_flag=0.1", "--string_flag=asd", }; TestParse(in_args1, 2, 0.1, "asd", true); const char* in_args2[] = { "testbin", "--string_flag=", "--nobool_flag", "--int_flag", "-011", "--double_flag", "-1e-2", }; TestParse(in_args2, -11, -0.01, "", false); const char* in_args3[] = { "testbin", "--int_flag", "-0", "--string_flag", "\"\"", "--bool_flag=true", "--double_flag=1e18", }; TestParse(in_args3, 0, 1e18, "\"\"", true); } TEST_F(ParseTest, TestPositionalArgs) { const char* in_args1[] = { "testbin", "p1", "p2", }; TestParse(in_args1, 1, 1.1, "a", false, 2); auto out_args1 = InvokeParse(in_args1); EXPECT_STREQ(out_args1[1], "p1"); EXPECT_STREQ(out_args1[2], "p2"); const char* in_args2[] = { "testbin", "--int_flag=2", "p1", }; TestParse(in_args2, 2, 1.1, "a", false, 1); auto out_args2 = InvokeParse(in_args2); EXPECT_STREQ(out_args2[1], "p1"); const char* in_args3[] = {"testbin", "p1", "--int_flag=3", "p2", "--bool_flag", "true"}; TestParse(in_args3, 3, 1.1, "a", true, 3); auto out_args3 = InvokeParse(in_args3); EXPECT_STREQ(out_args3[1], "p1"); EXPECT_STREQ(out_args3[2], "p2"); EXPECT_STREQ(out_args3[3], "true"); const char* in_args4[] = { "testbin", "--", "p1", "p2", }; TestParse(in_args4, 3, 1.1, "a", true, 2); auto out_args4 = InvokeParse(in_args4); EXPECT_STREQ(out_args4[1], "p1"); EXPECT_STREQ(out_args4[2], "p2"); const char* in_args5[] = { "testbin", "p1", "--int_flag=4", "--", "--bool_flag", "false", "p2", }; TestParse(in_args5, 4, 1.1, "a", true, 4); auto out_args5 = InvokeParse(in_args5); EXPECT_STREQ(out_args5[1], "p1"); EXPECT_STREQ(out_args5[2], "--bool_flag"); EXPECT_STREQ(out_args5[3], "false"); EXPECT_STREQ(out_args5[4], "p2"); } using ParseDeathTest = ParseTest; TEST_F(ParseDeathTest, TestUndefinedArg) { const char* in_args1[] = { "testbin", "--undefined_flag", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1), "Unknown command line flag 'undefined_flag'"); const char* in_args2[] = { "testbin", "--noprefixed_flag", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2), "Unknown command line flag 'noprefixed_flag'"); const char* in_args3[] = { "testbin", "--Int_flag=1", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args3), "Unknown command line flag 'Int_flag'"); } TEST_F(ParseDeathTest, TestInvalidBoolFlagFormat) { const char* in_args1[] = { "testbin", "--bool_flag=", }; EXPECT_DEATH_IF_SUPPORTED( InvokeParse(in_args1), "Missing the value after assignment for the boolean flag 'bool_flag'"); const char* in_args2[] = { "testbin", "--nobool_flag=true", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2), "Negative form with assignment is not valid for the boolean " "flag 'bool_flag'"); } TEST_F(ParseDeathTest, TestInvalidNonBoolFlagFormat) { const char* in_args1[] = { "testbin", "--nostring_flag", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1), "Negative form is not valid for the flag 'string_flag'"); const char* in_args2[] = { "testbin", "--int_flag", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2), "Missing the value for the flag 'int_flag'"); } TEST_F(ParseDeathTest, TestInvalidUDTFlagFormat) { const char* in_args1[] = { "testbin", "--udt_flag=1", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1), "Illegal value '1' specified for flag 'udt_flag'; Use values A, " "AAA instead"); const char* in_args2[] = { "testbin", "--udt_flag", "AA", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2), "Illegal value 'AA' specified for flag 'udt_flag'; Use values " "A, AAA instead"); } TEST_F(ParseDeathTest, TestFlagSuggestions) { const char* in_args1[] = { "testbin", "--legacy_boo", }; EXPECT_DEATH_IF_SUPPORTED( InvokeParse(in_args1), "Unknown command line flag 'legacy_boo'. Did you mean: legacy_bool ?"); const char* in_args2[] = {"testbin", "--foo", "--undefok=foo1"}; EXPECT_DEATH_IF_SUPPORTED( InvokeParse(in_args2), "Unknown command line flag 'foo'. Did you mean: foo1 \\(undefok\\)?"); const char* in_args3[] = { "testbin", "--nolegacy_ino", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args3), "Unknown command line flag 'nolegacy_ino'. Did " "you mean: nolegacy_bool, legacy_int ?"); } TEST_F(ParseTest, GetHints) { EXPECT_THAT(absl::flags_internal::GetMisspellingHints("legacy_boo"), testing::ContainerEq(std::vector<std::string>{"legacy_bool"})); EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_itn"), testing::ContainerEq(std::vector<std::string>{"legacy_int"})); EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_int1"), testing::ContainerEq(std::vector<std::string>{"legacy_int"})); EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_int"), testing::ContainerEq(std::vector<std::string>{"legacy_int"})); EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_ino"), testing::ContainerEq( std::vector<std::string>{"nolegacy_bool", "legacy_int"})); EXPECT_THAT( absl::flags_internal::GetMisspellingHints("FLAG_HEADER_000").size(), 100); } TEST_F(ParseTest, TestLegacyFlags) { const char* in_args1[] = { "testbin", "--legacy_int=11", }; TestParse(in_args1, 1, 1.1, "a", false); const char* in_args2[] = { "testbin", "--legacy_bool", }; TestParse(in_args2, 1, 1.1, "a", false); const char* in_args3[] = { "testbin", "--legacy_int", "22", "--int_flag=2", "--legacy_bool", "true", "--legacy_str", "--string_flag=qwe", }; TestParse(in_args3, 2, 1.1, "a", false, 1); } TEST_F(ParseTest, TestSimpleValidFlagfile) { std::string flagfile_flag; const char* in_args1[] = { "testbin", GetFlagfileFlag({{"parse_test.ff1", absl::MakeConstSpan(ff1_data)}}, flagfile_flag), }; TestParse(in_args1, -1, 0.1, "q2w2 ", true); const char* in_args2[] = { "testbin", GetFlagfileFlag({{"parse_test.ff2", absl::MakeConstSpan(ff2_data)}}, flagfile_flag), }; TestParse(in_args2, 100, 0.1, "q2w2 ", false); } TEST_F(ParseTest, TestValidMultiFlagfile) { std::string flagfile_flag; const char* in_args1[] = { "testbin", GetFlagfileFlag({{"parse_test.ff2", absl::MakeConstSpan(ff2_data)}, {"parse_test.ff1", absl::MakeConstSpan(ff1_data)}}, flagfile_flag), }; TestParse(in_args1, -1, 0.1, "q2w2 ", true); } TEST_F(ParseTest, TestFlagfileMixedWithRegularFlags) { std::string flagfile_flag; const char* in_args1[] = { "testbin", "--int_flag=3", GetFlagfileFlag({{"parse_test.ff1", absl::MakeConstSpan(ff1_data)}}, flagfile_flag), "-double_flag=0.2"}; TestParse(in_args1, -1, 0.2, "q2w2 ", true); } TEST_F(ParseTest, TestFlagfileInFlagfile) { std::string flagfile_flag; constexpr const char* const ff3_data[] = { "--flagfile=$0/parse_test.ff1", "--flagfile=$0/parse_test.ff2", }; GetFlagfileFlag({{"parse_test.ff2", absl::MakeConstSpan(ff2_data)}, {"parse_test.ff1", absl::MakeConstSpan(ff1_data)}}, flagfile_flag); const char* in_args1[] = { "testbin", GetFlagfileFlag({{"parse_test.ff3", absl::MakeConstSpan(ff3_data)}}, flagfile_flag), }; TestParse(in_args1, 100, 0.1, "q2w2 ", false); } TEST_F(ParseDeathTest, TestInvalidFlagfiles) { std::string flagfile_flag; constexpr const char* const ff4_data[] = { "--unknown_flag=10" }; const char* in_args1[] = { "testbin", GetFlagfileFlag({{"parse_test.ff4", absl::MakeConstSpan(ff4_data)}}, flagfile_flag), }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1), "Unknown command line flag 'unknown_flag'"); constexpr const char* const ff5_data[] = { "--int_flag 10", }; const char* in_args2[] = { "testbin", GetFlagfileFlag({{"parse_test.ff5", absl::MakeConstSpan(ff5_data)}}, flagfile_flag), }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2), "Unknown command line flag 'int_flag 10'"); constexpr const char* const ff6_data[] = { "--int_flag=10", "--", "arg1", "arg2", "arg3", }; const char* in_args3[] = { "testbin", GetFlagfileFlag({{"parse_test.ff6", absl::MakeConstSpan(ff6_data)}}, flagfile_flag), }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args3), "Flagfile can't contain position arguments or --"); const char* in_args4[] = { "testbin", "--flagfile=invalid_flag_file", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args4), "Can't open flagfile invalid_flag_file"); constexpr const char* const ff7_data[] = { "--int_flag=10", "*bin*", "--str_flag=aqsw", }; const char* in_args5[] = { "testbin", GetFlagfileFlag({{"parse_test.ff7", absl::MakeConstSpan(ff7_data)}}, flagfile_flag), }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args5), "Unexpected line in the flagfile .*: \\*bin\\*"); } TEST_F(ParseTest, TestReadingRequiredFlagsFromEnv) { const char* in_args1[] = {"testbin", "--fromenv=int_flag,bool_flag,string_flag"}; ScopedSetEnv set_int_flag("FLAGS_int_flag", "33"); ScopedSetEnv set_bool_flag("FLAGS_bool_flag", "True"); ScopedSetEnv set_string_flag("FLAGS_string_flag", "AQ12"); TestParse(in_args1, 33, 1.1, "AQ12", true); } TEST_F(ParseDeathTest, TestReadingUnsetRequiredFlagsFromEnv) { const char* in_args1[] = {"testbin", "--fromenv=int_flag"}; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1), "FLAGS_int_flag not found in environment"); } TEST_F(ParseDeathTest, TestRecursiveFlagsFromEnv) { const char* in_args1[] = {"testbin", "--fromenv=tryfromenv"}; ScopedSetEnv set_tryfromenv("FLAGS_tryfromenv", "int_flag"); EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1), "Infinite recursion on flag tryfromenv"); } TEST_F(ParseTest, TestReadingOptionalFlagsFromEnv) { const char* in_args1[] = { "testbin", "--tryfromenv=int_flag,bool_flag,string_flag,other_flag"}; ScopedSetEnv set_int_flag("FLAGS_int_flag", "17"); ScopedSetEnv set_bool_flag("FLAGS_bool_flag", "Y"); TestParse(in_args1, 17, 1.1, "a", true); } TEST_F(ParseTest, TestReadingFlagsFromEnvMoxedWithRegularFlags) { const char* in_args1[] = { "testbin", "--bool_flag=T", "--tryfromenv=int_flag,bool_flag", "--int_flag=-21", }; ScopedSetEnv set_int_flag("FLAGS_int_flag", "-15"); ScopedSetEnv set_bool_flag("FLAGS_bool_flag", "F"); TestParse(in_args1, -21, 1.1, "a", false); } TEST_F(ParseDeathTest, TestSimpleHelpFlagHandling) { const char* in_args1[] = { "testbin", "--help", }; EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kImportant); EXPECT_EXIT(InvokeParse(in_args1), testing::ExitedWithCode(1), ""); const char* in_args2[] = { "testbin", "--help", "--int_flag=3", }; EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args2), flags::HelpMode::kImportant); EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 3); const char* in_args3[] = {"testbin", "--help", "some_positional_arg"}; EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args3), flags::HelpMode::kImportant); } TEST_F(ParseTest, TestSubstringHelpFlagHandling) { const char* in_args1[] = { "testbin", "--help=abcd", }; EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kMatch); EXPECT_EQ(flags::GetFlagsHelpMatchSubstr(), "abcd"); } TEST_F(ParseDeathTest, TestVersionHandling) { const char* in_args1[] = { "testbin", "--version", }; EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kVersion); } TEST_F(ParseTest, TestCheckArgsHandling) { const char* in_args1[] = {"testbin", "--only_check_args", "--int_flag=211"}; EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kOnlyCheckArgs); EXPECT_EXIT(InvokeParseAbslOnly(in_args1), testing::ExitedWithCode(0), ""); EXPECT_EXIT(InvokeParse(in_args1), testing::ExitedWithCode(0), ""); const char* in_args2[] = {"testbin", "--only_check_args", "--unknown_flag=a"}; EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args2), flags::HelpMode::kOnlyCheckArgs); EXPECT_EXIT(InvokeParseAbslOnly(in_args2), testing::ExitedWithCode(0), ""); EXPECT_EXIT(InvokeParse(in_args2), testing::ExitedWithCode(1), ""); } TEST_F(ParseTest, WasPresentOnCommandLine) { const char* in_args1[] = { "testbin", "arg1", "--bool_flag", "--int_flag=211", "arg2", "--double_flag=1.1", "--string_flag", "asd", "--", "--some_flag", "arg4", }; InvokeParse(in_args1); EXPECT_TRUE(flags::WasPresentOnCommandLine("bool_flag")); EXPECT_TRUE(flags::WasPresentOnCommandLine("int_flag")); EXPECT_TRUE(flags::WasPresentOnCommandLine("double_flag")); EXPECT_TRUE(flags::WasPresentOnCommandLine("string_flag")); EXPECT_FALSE(flags::WasPresentOnCommandLine("some_flag")); EXPECT_FALSE(flags::WasPresentOnCommandLine("another_flag")); } TEST_F(ParseTest, ParseAbseilFlagsOnlySuccess) { const char* in_args[] = { "testbin", "arg1", "--bool_flag", "--int_flag=211", "arg2", "--double_flag=1.1", "--undef_flag1", "--undef_flag2=123", "--string_flag", "asd", "--", "--some_flag", "arg4", }; std::vector<char*> positional_args; std::vector<absl::UnrecognizedFlag> unrecognized_flags; absl::ParseAbseilFlagsOnly(13, const_cast<char**>(in_args), positional_args, unrecognized_flags); EXPECT_THAT(positional_args, ElementsAreArray( {absl::string_view("testbin"), absl::string_view("arg1"), absl::string_view("arg2"), absl::string_view("--some_flag"), absl::string_view("arg4")})); EXPECT_THAT(unrecognized_flags, ElementsAreArray( {absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv, "undef_flag1"), absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv, "undef_flag2")})); } TEST_F(ParseDeathTest, ParseAbseilFlagsOnlyFailure) { const char* in_args[] = { "testbin", "--int_flag=21.1", }; EXPECT_DEATH_IF_SUPPORTED( InvokeParseAbslOnly(in_args), "Illegal value '21.1' specified for flag 'int_flag'"); } TEST_F(ParseTest, UndefOkFlagsAreIgnored) { const char* in_args[] = { "testbin", "--undef_flag1", "--undef_flag2=123", "--undefok=undef_flag2", "--undef_flag3", "value", }; std::vector<char*> positional_args; std::vector<absl::UnrecognizedFlag> unrecognized_flags; absl::ParseAbseilFlagsOnly(6, const_cast<char**>(in_args), positional_args, unrecognized_flags); EXPECT_THAT(positional_args, ElementsAreArray({absl::string_view("testbin"), absl::string_view("value")})); EXPECT_THAT(unrecognized_flags, ElementsAreArray( {absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv, "undef_flag1"), absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv, "undef_flag3")})); } TEST_F(ParseTest, AllUndefOkFlagsAreIgnored) { const char* in_args[] = { "testbin", "--undef_flag1", "--undef_flag2=123", "--undefok=undef_flag2,undef_flag1,undef_flag3", "--undef_flag3", "value", "--", "--undef_flag4", }; std::vector<char*> positional_args; std::vector<absl::UnrecognizedFlag> unrecognized_flags; absl::ParseAbseilFlagsOnly(8, const_cast<char**>(in_args), positional_args, unrecognized_flags); EXPECT_THAT(positional_args, ElementsAreArray({absl::string_view("testbin"), absl::string_view("value"), absl::string_view("--undef_flag4")})); EXPECT_THAT(unrecognized_flags, testing::IsEmpty()); } TEST_F(ParseDeathTest, ExitOnUnrecognizedFlagPrintsHelp) { const char* in_args[] = { "testbin", "--undef_flag1", "--help=int_flag", }; EXPECT_EXIT(InvokeParseCommandLineImpl(in_args), testing::ExitedWithCode(1), AllOf(HasSubstr("Unknown command line flag 'undef_flag1'"), HasSubstr("Try --helpfull to get a list of all flags"))); } }
bool CanIgnoreUndefinedFlag(absl::string_view flag_name) { auto undefok = absl::GetFlag(FLAGS_undefok); if (std::find(undefok.begin(), undefok.end(), flag_name) != undefok.end()) { return true; } if (absl::ConsumePrefix(&flag_name, "no") && std::find(undefok.begin(), undefok.end(), flag_name) != undefok.end()) { return true; } return false; }
TEST_F(ParseDeathTest, TestFlagSuggestions) { const char* in_args1[] = { "testbin", "--legacy_boo", }; EXPECT_DEATH_IF_SUPPORTED( InvokeParse(in_args1), "Unknown command line flag 'legacy_boo'. Did you mean: legacy_bool ?"); const char* in_args2[] = {"testbin", "--foo", "--undefok=foo1"}; EXPECT_DEATH_IF_SUPPORTED( InvokeParse(in_args2), "Unknown command line flag 'foo'. Did you mean: foo1 \\(undefok\\)?"); const char* in_args3[] = { "testbin", "--nolegacy_ino", }; EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args3), "Unknown command line flag 'nolegacy_ino'. Did " "you mean: nolegacy_bool, legacy_int ?"); }
#include "base/operators.h" #include <algorithm> #include <array> #include "absl/base/attributes.h" #include "absl/base/call_once.h" #include "absl/log/absl_check.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "base/internal/operators.h" namespace cel { namespace { using base_internal::OperatorData; struct OperatorDataNameComparer { using is_transparent = void; bool operator()(const OperatorData* lhs, const OperatorData* rhs) const { return lhs->name < rhs->name; } bool operator()(const OperatorData* lhs, absl::string_view rhs) const { return lhs->name < rhs; } bool operator()(absl::string_view lhs, const OperatorData* rhs) const { return lhs < rhs->name; } }; struct OperatorDataDisplayNameComparer { using is_transparent = void; bool operator()(const OperatorData* lhs, const OperatorData* rhs) const { return lhs->display_name < rhs->display_name; } bool operator()(const OperatorData* lhs, absl::string_view rhs) const { return lhs->display_name < rhs; } bool operator()(absl::string_view lhs, const OperatorData* rhs) const { return lhs < rhs->display_name; } }; #define CEL_OPERATORS_DATA(id, symbol, name, precedence, arity) \ ABSL_CONST_INIT const OperatorData id##_storage = { \ OperatorId::k##id, name, symbol, precedence, arity}; CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATORS_DATA) #undef CEL_OPERATORS_DATA #define CEL_OPERATORS_COUNT(id, symbol, name, precedence, arity) +1 using OperatorsArray = std::array<const OperatorData*, 0 + CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATORS_COUNT)>; using UnaryOperatorsArray = std::array<const OperatorData*, 0 + CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_OPERATORS_COUNT)>; using BinaryOperatorsArray = std::array<const OperatorData*, 0 + CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_OPERATORS_COUNT)>; using TernaryOperatorsArray = std::array<const OperatorData*, 0 + CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_OPERATORS_COUNT)>; #undef CEL_OPERATORS_COUNT ABSL_CONST_INIT absl::once_flag operators_once_flag; #define CEL_OPERATORS_DO(id, symbol, name, precedence, arity) &id##_storage, OperatorsArray operators_by_name = { CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATORS_DO)}; OperatorsArray operators_by_display_name = { CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATORS_DO)}; UnaryOperatorsArray unary_operators_by_name = { CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_OPERATORS_DO)}; UnaryOperatorsArray unary_operators_by_display_name = { CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_OPERATORS_DO)}; BinaryOperatorsArray binary_operators_by_name = { CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_OPERATORS_DO)}; BinaryOperatorsArray binary_operators_by_display_name = { CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_OPERATORS_DO)}; TernaryOperatorsArray ternary_operators_by_name = { CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_OPERATORS_DO)}; TernaryOperatorsArray ternary_operators_by_display_name = { CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_OPERATORS_DO)}; #undef CEL_OPERATORS_DO void InitializeOperators() { std::stable_sort(operators_by_name.begin(), operators_by_name.end(), OperatorDataNameComparer{}); std::stable_sort(operators_by_display_name.begin(), operators_by_display_name.end(), OperatorDataDisplayNameComparer{}); std::stable_sort(unary_operators_by_name.begin(), unary_operators_by_name.end(), OperatorDataNameComparer{}); std::stable_sort(unary_operators_by_display_name.begin(), unary_operators_by_display_name.end(), OperatorDataDisplayNameComparer{}); std::stable_sort(binary_operators_by_name.begin(), binary_operators_by_name.end(), OperatorDataNameComparer{}); std::stable_sort(binary_operators_by_display_name.begin(), binary_operators_by_display_name.end(), OperatorDataDisplayNameComparer{}); std::stable_sort(ternary_operators_by_name.begin(), ternary_operators_by_name.end(), OperatorDataNameComparer{}); std::stable_sort(ternary_operators_by_display_name.begin(), ternary_operators_by_display_name.end(), OperatorDataDisplayNameComparer{}); } } UnaryOperator::UnaryOperator(Operator op) : data_(op.data_) { ABSL_CHECK(op.arity() == Arity::kUnary); } BinaryOperator::BinaryOperator(Operator op) : data_(op.data_) { ABSL_CHECK(op.arity() == Arity::kBinary); } TernaryOperator::TernaryOperator(Operator op) : data_(op.data_) { ABSL_CHECK(op.arity() == Arity::kTernary); } #define CEL_UNARY_OPERATOR(id, symbol, name, precedence, arity) \ UnaryOperator Operator::id() { return UnaryOperator(&id##_storage); } CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_UNARY_OPERATOR) #undef CEL_UNARY_OPERATOR #define CEL_BINARY_OPERATOR(id, symbol, name, precedence, arity) \ BinaryOperator Operator::id() { return BinaryOperator(&id##_storage); } CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_BINARY_OPERATOR) #undef CEL_BINARY_OPERATOR #define CEL_TERNARY_OPERATOR(id, symbol, name, precedence, arity) \ TernaryOperator Operator::id() { return TernaryOperator(&id##_storage); } CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_TERNARY_OPERATOR) #undef CEL_TERNARY_OPERATOR absl::optional<Operator> Operator::FindByName(absl::string_view input) { absl::call_once(operators_once_flag, InitializeOperators); if (input.empty()) { return absl::nullopt; } auto it = std::lower_bound(operators_by_name.cbegin(), operators_by_name.cend(), input, OperatorDataNameComparer{}); if (it == operators_by_name.cend() || (*it)->name != input) { return absl::nullopt; } return Operator(*it); } absl::optional<Operator> Operator::FindByDisplayName(absl::string_view input) { absl::call_once(operators_once_flag, InitializeOperators); if (input.empty()) { return absl::nullopt; } auto it = std::lower_bound(operators_by_display_name.cbegin(), operators_by_display_name.cend(), input, OperatorDataDisplayNameComparer{}); if (it == operators_by_name.cend() || (*it)->display_name != input) { return absl::nullopt; } return Operator(*it); } absl::optional<UnaryOperator> UnaryOperator::FindByName( absl::string_view input) { absl::call_once(operators_once_flag, InitializeOperators); if (input.empty()) { return absl::nullopt; } auto it = std::lower_bound(unary_operators_by_name.cbegin(), unary_operators_by_name.cend(), input, OperatorDataNameComparer{}); if (it == unary_operators_by_name.cend() || (*it)->name != input) { return absl::nullopt; } return UnaryOperator(*it); } absl::optional<UnaryOperator> UnaryOperator::FindByDisplayName( absl::string_view input) { absl::call_once(operators_once_flag, InitializeOperators); if (input.empty()) { return absl::nullopt; } auto it = std::lower_bound(unary_operators_by_display_name.cbegin(), unary_operators_by_display_name.cend(), input, OperatorDataDisplayNameComparer{}); if (it == unary_operators_by_display_name.cend() || (*it)->display_name != input) { return absl::nullopt; } return UnaryOperator(*it); } absl::optional<BinaryOperator> BinaryOperator::FindByName( absl::string_view input) { absl::call_once(operators_once_flag, InitializeOperators); if (input.empty()) { return absl::nullopt; } auto it = std::lower_bound(binary_operators_by_name.cbegin(), binary_operators_by_name.cend(), input, OperatorDataNameComparer{}); if (it == binary_operators_by_name.cend() || (*it)->name != input) { return absl::nullopt; } return BinaryOperator(*it); } absl::optional<BinaryOperator> BinaryOperator::FindByDisplayName( absl::string_view input) { absl::call_once(operators_once_flag, InitializeOperators); if (input.empty()) { return absl::nullopt; } auto it = std::lower_bound(binary_operators_by_display_name.cbegin(), binary_operators_by_display_name.cend(), input, OperatorDataDisplayNameComparer{}); if (it == binary_operators_by_display_name.cend() || (*it)->display_name != input) { return absl::nullopt; } return BinaryOperator(*it); } absl::optional<TernaryOperator> TernaryOperator::FindByName( absl::string_view input) { absl::call_once(operators_once_flag, InitializeOperators); if (input.empty()) { return absl::nullopt; } auto it = std::lower_bound(ternary_operators_by_name.cbegin(), ternary_operators_by_name.cend(), input, OperatorDataNameComparer{}); if (it == ternary_operators_by_name.cend() || (*it)->name != input) { return absl::nullopt; } return TernaryOperator(*it); } absl::optional<TernaryOperator> TernaryOperator::FindByDisplayName( absl::string_view input) { absl::call_once(operators_once_flag, InitializeOperators); if (input.empty()) { return absl::nullopt; } auto it = std::lower_bound(ternary_operators_by_display_name.cbegin(), ternary_operators_by_display_name.cend(), input, OperatorDataDisplayNameComparer{}); if (it == ternary_operators_by_display_name.cend() || (*it)->display_name != input) { return absl::nullopt; } return TernaryOperator(*it); } }
#include "base/operators.h" #include <type_traits> #include "absl/hash/hash_testing.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "base/internal/operators.h" #include "internal/testing.h" namespace cel { namespace { using testing::Eq; using testing::Optional; template <typename Op, typename OpId> void TestOperator(Op op, OpId id, absl::string_view name, absl::string_view display_name, int precedence, Arity arity) { EXPECT_EQ(op.id(), id); EXPECT_EQ(Operator(op).id(), static_cast<OperatorId>(id)); EXPECT_EQ(op.name(), name); EXPECT_EQ(op.display_name(), display_name); EXPECT_EQ(op.precedence(), precedence); EXPECT_EQ(op.arity(), arity); EXPECT_EQ(Operator(op).arity(), arity); EXPECT_EQ(Op(Operator(op)), op); } void TestUnaryOperator(UnaryOperator op, UnaryOperatorId id, absl::string_view name, absl::string_view display_name, int precedence) { TestOperator(op, id, name, display_name, precedence, Arity::kUnary); } void TestBinaryOperator(BinaryOperator op, BinaryOperatorId id, absl::string_view name, absl::string_view display_name, int precedence) { TestOperator(op, id, name, display_name, precedence, Arity::kBinary); } void TestTernaryOperator(TernaryOperator op, TernaryOperatorId id, absl::string_view name, absl::string_view display_name, int precedence) { TestOperator(op, id, name, display_name, precedence, Arity::kTernary); } TEST(Operator, TypeTraits) { EXPECT_FALSE(std::is_default_constructible_v<Operator>); EXPECT_TRUE(std::is_copy_constructible_v<Operator>); EXPECT_TRUE(std::is_move_constructible_v<Operator>); EXPECT_TRUE(std::is_copy_assignable_v<Operator>); EXPECT_TRUE(std::is_move_assignable_v<Operator>); EXPECT_FALSE((std::is_convertible_v<Operator, UnaryOperator>)); EXPECT_FALSE((std::is_convertible_v<Operator, BinaryOperator>)); EXPECT_FALSE((std::is_convertible_v<Operator, TernaryOperator>)); } TEST(UnaryOperator, TypeTraits) { EXPECT_FALSE(std::is_default_constructible_v<UnaryOperator>); EXPECT_TRUE(std::is_copy_constructible_v<UnaryOperator>); EXPECT_TRUE(std::is_move_constructible_v<UnaryOperator>); EXPECT_TRUE(std::is_copy_assignable_v<UnaryOperator>); EXPECT_TRUE(std::is_move_assignable_v<UnaryOperator>); EXPECT_TRUE((std::is_convertible_v<UnaryOperator, Operator>)); } TEST(BinaryOperator, TypeTraits) { EXPECT_FALSE(std::is_default_constructible_v<BinaryOperator>); EXPECT_TRUE(std::is_copy_constructible_v<BinaryOperator>); EXPECT_TRUE(std::is_move_constructible_v<BinaryOperator>); EXPECT_TRUE(std::is_copy_assignable_v<BinaryOperator>); EXPECT_TRUE(std::is_move_assignable_v<BinaryOperator>); EXPECT_TRUE((std::is_convertible_v<BinaryOperator, Operator>)); } TEST(TernaryOperator, TypeTraits) { EXPECT_FALSE(std::is_default_constructible_v<TernaryOperator>); EXPECT_TRUE(std::is_copy_constructible_v<TernaryOperator>); EXPECT_TRUE(std::is_move_constructible_v<TernaryOperator>); EXPECT_TRUE(std::is_copy_assignable_v<TernaryOperator>); EXPECT_TRUE(std::is_move_assignable_v<TernaryOperator>); EXPECT_TRUE((std::is_convertible_v<TernaryOperator, Operator>)); } #define CEL_UNARY_OPERATOR(id, symbol, name, precedence, arity) \ TEST(UnaryOperator, id) { \ TestUnaryOperator(UnaryOperator::id(), UnaryOperatorId::k##id, name, \ symbol, precedence); \ } CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_UNARY_OPERATOR) #undef CEL_UNARY_OPERATOR #define CEL_BINARY_OPERATOR(id, symbol, name, precedence, arity) \ TEST(BinaryOperator, id) { \ TestBinaryOperator(BinaryOperator::id(), BinaryOperatorId::k##id, name, \ symbol, precedence); \ } CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_BINARY_OPERATOR) #undef CEL_BINARY_OPERATOR #define CEL_TERNARY_OPERATOR(id, symbol, name, precedence, arity) \ TEST(TernaryOperator, id) { \ TestTernaryOperator(TernaryOperator::id(), TernaryOperatorId::k##id, name, \ symbol, precedence); \ } CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_TERNARY_OPERATOR) #undef CEL_TERNARY_OPERATOR TEST(Operator, FindByName) { EXPECT_THAT(Operator::FindByName("@in"), Optional(Eq(Operator::In()))); EXPECT_THAT(Operator::FindByName("_in_"), Optional(Eq(Operator::OldIn()))); EXPECT_THAT(Operator::FindByName("in"), Eq(absl::nullopt)); EXPECT_THAT(Operator::FindByName(""), Eq(absl::nullopt)); } TEST(Operator, FindByDisplayName) { EXPECT_THAT(Operator::FindByDisplayName("-"), Optional(Eq(Operator::Subtract()))); EXPECT_THAT(Operator::FindByDisplayName("@in"), Eq(absl::nullopt)); EXPECT_THAT(Operator::FindByDisplayName(""), Eq(absl::nullopt)); } TEST(UnaryOperator, FindByName) { EXPECT_THAT(UnaryOperator::FindByName("-_"), Optional(Eq(Operator::Negate()))); EXPECT_THAT(UnaryOperator::FindByName("_-_"), Eq(absl::nullopt)); EXPECT_THAT(UnaryOperator::FindByName(""), Eq(absl::nullopt)); } TEST(UnaryOperator, FindByDisplayName) { EXPECT_THAT(UnaryOperator::FindByDisplayName("-"), Optional(Eq(Operator::Negate()))); EXPECT_THAT(UnaryOperator::FindByDisplayName("&&"), Eq(absl::nullopt)); EXPECT_THAT(UnaryOperator::FindByDisplayName(""), Eq(absl::nullopt)); } TEST(BinaryOperator, FindByName) { EXPECT_THAT(BinaryOperator::FindByName("_-_"), Optional(Eq(Operator::Subtract()))); EXPECT_THAT(BinaryOperator::FindByName("-_"), Eq(absl::nullopt)); EXPECT_THAT(BinaryOperator::FindByName(""), Eq(absl::nullopt)); } TEST(BinaryOperator, FindByDisplayName) { EXPECT_THAT(BinaryOperator::FindByDisplayName("-"), Optional(Eq(Operator::Subtract()))); EXPECT_THAT(BinaryOperator::FindByDisplayName("!"), Eq(absl::nullopt)); EXPECT_THAT(BinaryOperator::FindByDisplayName(""), Eq(absl::nullopt)); } TEST(TernaryOperator, FindByName) { EXPECT_THAT(TernaryOperator::FindByName("_?_:_"), Optional(Eq(TernaryOperator::Conditional()))); EXPECT_THAT(TernaryOperator::FindByName("-_"), Eq(absl::nullopt)); EXPECT_THAT(TernaryOperator::FindByName(""), Eq(absl::nullopt)); } TEST(TernaryOperator, FindByDisplayName) { EXPECT_THAT(TernaryOperator::FindByDisplayName(""), Eq(absl::nullopt)); EXPECT_THAT(TernaryOperator::FindByDisplayName("!"), Eq(absl::nullopt)); } TEST(Operator, SupportsAbslHash) { #define CEL_OPERATOR(id, symbol, name, precedence, arity) \ Operator(Operator::id()), EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( {CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATOR)})); #undef CEL_OPERATOR } TEST(UnaryOperator, SupportsAbslHash) { #define CEL_UNARY_OPERATOR(id, symbol, name, precedence, arity) \ UnaryOperator::id(), EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( {CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_UNARY_OPERATOR)})); #undef CEL_UNARY_OPERATOR } TEST(BinaryOperator, SupportsAbslHash) { #define CEL_BINARY_OPERATOR(id, symbol, name, precedence, arity) \ BinaryOperator::id(), EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( {CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_BINARY_OPERATOR)})); #undef CEL_BINARY_OPERATOR } } }
absl::optional<Operator> Operator::FindByDisplayName(absl::string_view input) { absl::call_once(operators_once_flag, InitializeOperators); if (input.empty()) { return absl::nullopt; } auto it = std::lower_bound(operators_by_display_name.cbegin(), operators_by_display_name.cend(), input, OperatorDataDisplayNameComparer{}); if (it == operators_by_name.cend() || (*it)->display_name != input) { return absl::nullopt; } return Operator(*it); }
TEST(Operator, FindByDisplayName) { EXPECT_THAT(Operator::FindByDisplayName("-"), Optional(Eq(Operator::Subtract()))); EXPECT_THAT(Operator::FindByDisplayName("@in"), Eq(absl::nullopt)); EXPECT_THAT(Operator::FindByDisplayName(""), Eq(absl::nullopt)); }
#include "tsl/lib/monitoring/sampler.h" #include "tsl/platform/platform.h" #ifdef IS_MOBILE_PLATFORM #else namespace tsl { namespace monitoring { namespace { class ExplicitBuckets : public Buckets { public: ~ExplicitBuckets() override = default; explicit ExplicitBuckets(std::vector<double> bucket_limits) : bucket_limits_(std::move(bucket_limits)) { CHECK_GT(bucket_limits_.size(), 0); for (size_t i = 1; i < bucket_limits_.size(); i++) { CHECK_GT(bucket_limits_[i], bucket_limits_[i - 1]); } if (bucket_limits_.back() != DBL_MAX) { bucket_limits_.push_back(DBL_MAX); } } const std::vector<double>& explicit_bounds() const override { return bucket_limits_; } private: std::vector<double> bucket_limits_; ExplicitBuckets(const ExplicitBuckets&) = delete; void operator=(const ExplicitBuckets&) = delete; }; class ExponentialBuckets : public Buckets { public: ~ExponentialBuckets() override = default; ExponentialBuckets(double scale, double growth_factor, int bucket_count) : explicit_buckets_( ComputeBucketLimits(scale, growth_factor, bucket_count)) {} const std::vector<double>& explicit_bounds() const override { return explicit_buckets_.explicit_bounds(); } private: static std::vector<double> ComputeBucketLimits(double scale, double growth_factor, int bucket_count) { CHECK_GT(bucket_count, 0); std::vector<double> bucket_limits; double bound = scale; for (int i = 0; i < bucket_count; i++) { bucket_limits.push_back(bound); bound *= growth_factor; } return bucket_limits; } ExplicitBuckets explicit_buckets_; ExponentialBuckets(const ExponentialBuckets&) = delete; void operator=(const ExponentialBuckets&) = delete; }; } std::unique_ptr<Buckets> Buckets::Explicit(std::vector<double> bucket_limits) { return std::unique_ptr<Buckets>( new ExplicitBuckets(std::move(bucket_limits))); } std::unique_ptr<Buckets> Buckets::Explicit( std::initializer_list<double> bucket_limits) { return std::unique_ptr<Buckets>(new ExplicitBuckets(bucket_limits)); } std::unique_ptr<Buckets> Buckets::Exponential(double scale, double growth_factor, int bucket_count) { return std::unique_ptr<Buckets>( new ExponentialBuckets(scale, growth_factor, bucket_count)); } } } #endif
#include "tensorflow/core/lib/monitoring/sampler.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace monitoring { namespace { using histogram::Histogram; void EqHistograms(const Histogram& expected, const HistogramProto& actual_proto) { Histogram actual; ASSERT_TRUE(actual.DecodeFromProto(actual_proto)); EXPECT_EQ(expected.ToString(), actual.ToString()); } auto* sampler_with_labels = Sampler<1>::New({"/tensorflow/test/sampler_with_labels", "Sampler with one label.", "MyLabel"}, Buckets::Explicit({10.0, 20.0})); TEST(LabeledSamplerTest, InitializedEmpty) { Histogram empty; EqHistograms(empty, sampler_with_labels->GetCell("Empty")->value()); } TEST(LabeledSamplerTest, ExplicitBucketBoundaries) { Histogram expected({10.0, 20.0, DBL_MAX}); auto* cell = sampler_with_labels->GetCell("BucketBoundaries"); sampler_with_labels->GetCell("AddedToCheckPreviousCellValidity"); cell->Add(-1.0); expected.Add(-1.0); cell->Add(10.0); expected.Add(10.0); cell->Add(20.0); expected.Add(20.0); cell->Add(31.0); expected.Add(31.0); EqHistograms(expected, cell->value()); } auto* init_sampler_without_labels = Sampler<0>::New({"/tensorflow/test/init_sampler_without_labels", "Sampler without labels initialized as empty."}, Buckets::Explicit(std::vector<double>{1.5, 2.8})); TEST(UnlabeledSamplerTest, InitializedEmpty) { Histogram empty; EqHistograms(empty, init_sampler_without_labels->GetCell()->value()); } auto* sampler_without_labels = Sampler<0>::New({"/tensorflow/test/sampler_without_labels", "Sampler without labels initialized as empty."}, Buckets::Explicit({1.5, 2.8})); TEST(UnlabeledSamplerTest, ExplicitBucketBoundaries) { Histogram expected({1.5, 2.8, DBL_MAX}); auto* cell = sampler_without_labels->GetCell(); cell->Add(-1.0); expected.Add(-1.0); cell->Add(2.0); expected.Add(2.0); cell->Add(31.0); expected.Add(31.0); EqHistograms(expected, cell->value()); } auto* sampler_with_exponential = Sampler<1>::New({"/tensorflow/test/sampler_with_exponential", "Sampler with exponential buckets.", "MyLabel"}, Buckets::Exponential(1, 2, 3)); TEST(ExponentialSamplerTest, ExponentialBucketBoundaries) { Histogram expected({1.0, 2.0, 4.0, DBL_MAX}); auto* cell = sampler_with_exponential->GetCell("BucketBoundaries"); sampler_with_exponential->GetCell("AddedToCheckPreviousCellValidity"); cell->Add(-1.0); expected.Add(-1.0); cell->Add(0.5); expected.Add(0.5); cell->Add(1.001); expected.Add(1.001); cell->Add(3.999); expected.Add(3.999); cell->Add(6.0); expected.Add(6.0); EqHistograms(expected, cell->value()); } TEST(ExplicitSamplerTest, SameName) { auto* same_sampler = Sampler<1>::New({"/tensorflow/test/sampler_with_labels", "Sampler with one label.", "MyLabel"}, Buckets::Explicit({10.0, 20.0})); EXPECT_TRUE(sampler_with_labels->GetStatus().ok()); EXPECT_TRUE(same_sampler->GetStatus().ok()); delete same_sampler; } } } }
std::unique_ptr<Buckets> Buckets::Explicit(std::vector<double> bucket_limits) { return std::unique_ptr<Buckets>( new ExplicitBuckets(std::move(bucket_limits))); }
TEST(LabeledSamplerTest, ExplicitBucketBoundaries) { Histogram expected({10.0, 20.0, DBL_MAX}); auto* cell = sampler_with_labels->GetCell("BucketBoundaries"); sampler_with_labels->GetCell("AddedToCheckPreviousCellValidity"); cell->Add(-1.0); expected.Add(-1.0); cell->Add(10.0); expected.Add(10.0); cell->Add(20.0); expected.Add(20.0); cell->Add(31.0); expected.Add(31.0); EqHistograms(expected, cell->value()); } TEST(UnlabeledSamplerTest, ExplicitBucketBoundaries) { Histogram expected({1.5, 2.8, DBL_MAX}); auto* cell = sampler_without_labels->GetCell(); cell->Add(-1.0); expected.Add(-1.0); cell->Add(2.0); expected.Add(2.0); cell->Add(31.0); expected.Add(31.0); EqHistograms(expected, cell->value()); } TEST(ExplicitSamplerTest, SameName) { auto* same_sampler = Sampler<1>::New({"/tensorflow/test/sampler_with_labels", "Sampler with one label.", "MyLabel"}, Buckets::Explicit({10.0, 20.0})); EXPECT_TRUE(sampler_with_labels->GetStatus().ok()); EXPECT_TRUE(same_sampler->GetStatus().ok()); delete same_sampler; }
#include "tensorflow/core/util/tensor_slice_reader.h" #include <climits> #include <memory> #include <utility> #include <vector> #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/framework/versions.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/io/iterator.h" #include "tensorflow/core/lib/io/table.h" #include "tensorflow/core/lib/io/table_options.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/saved_tensor_slice_util.h" #include "tensorflow/core/util/tensor_slice_util.h" namespace tensorflow { namespace checkpoint { TensorSliceReader::Table::~Table() = default; namespace { class TensorSliceReaderTable : public TensorSliceReader::Table { public: explicit TensorSliceReaderTable(RandomAccessFile* f, table::Table* t) : file_(f), table_(t) {} ~TensorSliceReaderTable() override { delete table_; delete file_; } bool Get(const string& key, string* value) override { std::unique_ptr<table::Iterator> iter(table_->NewIterator()); iter->Seek(key); if (iter->Valid() && iter->key() == key) { StringPiece v = iter->value(); value->assign(v.data(), v.size()); return true; } else { return false; } } private: RandomAccessFile* file_; table::Table* table_; }; } Status OpenTableTensorSliceReader(const string& fname, TensorSliceReader::Table** result) { *result = nullptr; Env* env = Env::Default(); std::unique_ptr<RandomAccessFile> f; Status s = env->NewRandomAccessFile(fname, &f); if (s.ok()) { uint64 file_size; s = env->GetFileSize(fname, &file_size); if (s.ok()) { table::Options options; table::Table* table; s = table::Table::Open(options, f.get(), file_size, &table); if (s.ok()) { *result = new TensorSliceReaderTable(f.release(), table); return absl::OkStatus(); } else { s = errors::CreateWithUpdatedMessage( s, strings::StrCat(s.message(), ": perhaps your file is in a different " "file format and you need to use a " "different restore operator?")); } } } LOG(WARNING) << "Could not open " << fname << ": " << s; return s; } TensorSliceReader::TensorSliceReader(const string& filepattern) : TensorSliceReader(filepattern, OpenTableTensorSliceReader, kLoadAllShards) {} TensorSliceReader::TensorSliceReader(const string& filepattern, OpenTableFunction open_function) : TensorSliceReader(filepattern, std::move(open_function), kLoadAllShards) { } TensorSliceReader::TensorSliceReader(const string& filepattern, OpenTableFunction open_function, int preferred_shard) : filepattern_(filepattern), open_function_(std::move(open_function)) { VLOG(1) << "TensorSliceReader for " << filepattern; Status s = Env::Default()->GetMatchingPaths(filepattern, &fnames_); if (!s.ok()) { status_ = errors::InvalidArgument( "Unsuccessful TensorSliceReader constructor: " "Failed to get matching files on ", filepattern, ": ", s.ToString()); return; } if (fnames_.empty()) { status_ = errors::NotFound( "Unsuccessful TensorSliceReader constructor: " "Failed to find any matching files for ", filepattern); return; } sss_.resize(fnames_.size()); for (size_t shard = 0; shard < fnames_.size(); ++shard) { fname_to_index_.insert(std::make_pair(fnames_[shard], shard)); } if (preferred_shard == kLoadAllShards || fnames_.size() == 1 || static_cast<size_t>(preferred_shard) >= fnames_.size()) { LoadAllShards(); } else { VLOG(1) << "Loading shard " << preferred_shard << " for " << filepattern_; LoadShard(preferred_shard); } } void TensorSliceReader::LoadShard(int shard) const { CHECK_LT(shard, sss_.size()); if (sss_[shard] || !status_.ok()) { return; } string value; SavedTensorSlices sts; const string fname = fnames_[shard]; VLOG(1) << "Reading meta data from file " << fname << "..."; Table* table; Status s = open_function_(fname, &table); if (!s.ok()) { status_ = errors::DataLoss("Unable to open table file ", fname, ": ", s.ToString()); return; } sss_[shard].reset(table); if (!(table->Get(kSavedTensorSlicesKey, &value) && ParseProtoUnlimited(&sts, value))) { status_ = errors::Internal( "Failed to find the saved tensor slices at the beginning of the " "checkpoint file: ", fname); return; } status_ = CheckVersions(sts.meta().versions(), TF_CHECKPOINT_VERSION, TF_CHECKPOINT_VERSION_MIN_PRODUCER, "Checkpoint", "checkpoint"); if (!status_.ok()) return; for (const SavedSliceMeta& ssm : sts.meta().tensor()) { TensorShape ssm_shape; status_ = TensorShape::BuildTensorShapeBase(ssm.shape(), &ssm_shape); if (!status_.ok()) return; for (const TensorSliceProto& tsp : ssm.slice()) { TensorSlice ss_slice; status_ = TensorSlice::BuildTensorSlice(tsp, &ss_slice); if (!status_.ok()) return; status_ = RegisterTensorSlice(ssm.name(), ssm_shape, ssm.type(), fname, ss_slice, &tensors_); if (!status_.ok()) return; } } } void TensorSliceReader::LoadAllShards() const { VLOG(1) << "Loading all shards for " << filepattern_; for (size_t i = 0; i < fnames_.size() && status_.ok(); ++i) { LoadShard(i); } all_shards_loaded_ = true; } const TensorSliceSet* TensorSliceReader::FindTensorSlice( const string& name, const TensorSlice& slice, std::vector<std::pair<TensorSlice, string>>* details) const { const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); if (tss && !tss->QueryMeta(slice, details)) { return nullptr; } return tss; } TensorSliceReader::~TensorSliceReader() { for (auto& temp : tensors_) { delete temp.second; } tensors_.clear(); } bool TensorSliceReader::HasTensor(const string& name, TensorShape* shape, DataType* type) const { mutex_lock l(mu_); const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); if (!tss && !all_shards_loaded_) { VLOG(1) << "Did not find tensor in preferred shard, loading all shards: " << name; LoadAllShards(); tss = gtl::FindPtrOrNull(tensors_, name); } if (tss) { if (shape) { *shape = tss->shape(); } if (type) { *type = tss->type(); } return true; } else { return false; } } Status TensorSliceReader::GetTensor( const string& name, std::unique_ptr<tensorflow::Tensor>* out_tensor) const { DataType type; TensorShape shape; TensorSlice slice; { mutex_lock l(mu_); const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); if (tss == nullptr) { return errors::NotFound(name, " not found in checkpoint file"); } if (tss->Slices().size() > 1) { return errors::Unimplemented("Sliced checkpoints are not supported"); } type = tss->type(); shape = tss->shape(); slice = tss->Slices().begin()->second.slice; } std::unique_ptr<tensorflow::Tensor> t(new tensorflow::Tensor); Status s = tensorflow::Tensor::BuildTensor(type, shape, t.get()); if (!s.ok()) return s; for (const auto d : shape.dim_sizes()) { if (d == LLONG_MAX) { return errors::InvalidArgument("Unable to read dimensions of size ", LLONG_MAX, ". Got shape: ", shape.DebugString()); } } bool success = false; #define READER_COPY(dt) \ case dt: \ success = CopySliceData(name, slice, \ t->flat<EnumToDataType<dt>::Type>().data()); \ break; switch (type) { READER_COPY(DT_FLOAT); READER_COPY(DT_DOUBLE); READER_COPY(DT_INT32); READER_COPY(DT_UINT8); READER_COPY(DT_INT16); READER_COPY(DT_INT8); READER_COPY(DT_INT64); READER_COPY(DT_STRING); READER_COPY(DT_BOOL); default: return errors::Unimplemented("Data type not supported"); } #undef READER_COPY if (!success) { return errors::NotFound(name, " not found in checkpoint file"); } std::swap(*out_tensor, t); return absl::OkStatus(); } TensorSliceReader::VarToShapeMap TensorSliceReader::GetVariableToShapeMap() const { VarToShapeMap name_to_shape; if (status().ok()) { for (auto& e : Tensors()) { name_to_shape[e.first] = e.second->shape(); } } return name_to_shape; } TensorSliceReader::VarToDataTypeMap TensorSliceReader::GetVariableToDataTypeMap() const { VarToDataTypeMap name_to_dtype; if (status().ok()) { for (auto& e : Tensors()) { name_to_dtype[e.first] = e.second->type(); } } return name_to_dtype; } const string TensorSliceReader::DebugString() const { string shape_str; if (status().ok()) { for (const auto& e : Tensors()) { strings::StrAppend(&shape_str, e.first, " (", DataType_Name(e.second->type()), ") ", e.second->shape().DebugString()); const int num_slices = e.second->Slices().size(); if (num_slices > 1) { strings::StrAppend(&shape_str, ", ", num_slices, " slices"); } strings::StrAppend(&shape_str, "\n"); } } return shape_str; } } }
#include "tensorflow/core/util/tensor_slice_reader.h" #include <functional> #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/io/iterator.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/io/table.h" #include "tensorflow/core/lib/io/table_builder.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/saved_tensor_slice.pb.h" #include "tensorflow/core/util/saved_tensor_slice_util.h" #include "tensorflow/core/util/tensor_slice_reader_cache.h" #include "tensorflow/core/util/tensor_slice_writer.h" namespace tensorflow { namespace checkpoint { namespace { void SimpleFloatHelper( const TensorSliceWriter::CreateBuilderFunction& create_function, TensorSliceReader::OpenTableFunction open_function) { const string fname_base = io::JoinPath(testing::TmpDir(), "float_checkpoint"); TensorShape shape({4, 5}); { const string fname = strings::StrCat(fname_base, "_0"); TensorSliceWriter writer(fname, create_function); const float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); } { const string fname = strings::StrCat(fname_base, "_1"); TensorSliceWriter writer(fname, create_function); { const float data[] = {10, 11, 12, 15, 16, 17}; TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } { const float data[] = {18, 19}; TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } TF_CHECK_OK(writer.Finish()); } const string filepattern = strings::StrCat(fname_base, "_*"); TensorSliceReader reader(filepattern, std::move(open_function)); TF_EXPECT_OK(reader.status()); EXPECT_EQ(2, reader.num_files()); { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("test", &shape, &type)); EXPECT_EQ("[4,5]", shape.DebugString()); EXPECT_EQ(DT_FLOAT, type); EXPECT_FALSE(reader.HasTensor("don't exist", nullptr, nullptr)); } { TensorSlice s = TensorSlice::ParseOrDie("0,2:-"); float expected[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; float results[10]; EXPECT_TRUE(reader.CopySliceData("test", s, results)); for (int i = 0; i < 10; ++i) { EXPECT_EQ(expected[i], results[i]); } } { TensorSlice s = TensorSlice::ParseOrDie("1,1:-"); float expected[] = {5, 6, 7, 8, 9}; float results[5]; EXPECT_TRUE(reader.CopySliceData("test", s, results)); for (int i = 0; i < 5; ++i) { EXPECT_EQ(expected[i], results[i]); } } { TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3"); float results[6]; EXPECT_FALSE(reader.CopySliceData("test", s, results)); } } TEST(TensorSliceReaderTest, SimpleFloat) { SimpleFloatHelper(CreateTableTensorSliceBuilder, OpenTableTensorSliceReader); } template <typename T, typename U> void SimpleIntXHelper( const TensorSliceWriter::CreateBuilderFunction& create_function, TensorSliceReader::OpenTableFunction open_function, const string& checkpoint_file) { const string fname_base = io::JoinPath(testing::TmpDir(), checkpoint_file); TensorShape shape({4, 5}); { const string fname = strings::StrCat(fname_base, "_0"); TensorSliceWriter writer(fname, create_function); const T data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); } { const string fname = strings::StrCat(fname_base, "_1"); TensorSliceWriter writer(fname, create_function); { const T data[] = {10, 11, 12, 15, 16, 17}; TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } { const T data[] = {18, 19}; TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } TF_CHECK_OK(writer.Finish()); } const string filepattern = strings::StrCat(fname_base, "_*"); TensorSliceReader reader(filepattern, std::move(open_function)); TF_EXPECT_OK(reader.status()); EXPECT_EQ(2, reader.num_files()); { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("test", &shape, &type)); EXPECT_EQ("[4,5]", shape.DebugString()); EXPECT_EQ(DataTypeToEnum<T>::v(), type); EXPECT_FALSE(reader.HasTensor("don't exist", nullptr, nullptr)); } { TensorSlice s = TensorSlice::ParseOrDie("0,2:-"); T expected[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; U results[10]; EXPECT_TRUE(reader.CopySliceData("test", s, results)); for (int i = 0; i < 10; ++i) { EXPECT_EQ(expected[i], results[i]); } } { TensorSlice s = TensorSlice::ParseOrDie("1,1:-"); T expected[] = {5, 6, 7, 8, 9}; U results[5]; EXPECT_TRUE(reader.CopySliceData("test", s, results)); for (int i = 0; i < 5; ++i) { EXPECT_EQ(expected[i], results[i]); } } { TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3"); U results[6]; EXPECT_FALSE(reader.CopySliceData("test", s, results)); } } #define TEST_SIMPLE_INT(TYPE, SAVED_TYPE) \ TEST(TensorSliceReaderTest, Simple##TYPE) { \ SimpleIntXHelper<TYPE, SAVED_TYPE>(CreateTableTensorSliceBuilder, \ OpenTableTensorSliceReader, \ #TYPE "_checkpoint"); \ } TEST_SIMPLE_INT(int32, int32) TEST_SIMPLE_INT(int64_t, int64_t) TEST_SIMPLE_INT(int16, int32) TEST_SIMPLE_INT(int8, int32) TEST_SIMPLE_INT(uint8, int32) void MutateSavedTensorSlices( const std::string& fname, const std::function<std::string(SavedTensorSlices)>& mutator) { table::Options options; options.compression = table::kNoCompression; std::vector<std::pair<std::string, std::string>> entries; { std::unique_ptr<RandomAccessFile> file; TF_CHECK_OK(Env::Default()->NewRandomAccessFile(fname, &file)); uint64 file_size; TF_CHECK_OK(Env::Default()->GetFileSize(fname, &file_size)); table::Table* t; TF_CHECK_OK(table::Table::Open(options, file.get(), file_size, &t)); std::unique_ptr<table::Table> table(t); std::unique_ptr<table::Iterator> it(table->NewIterator()); for (it->Seek(""); it->Valid(); it->Next()) { entries.emplace_back(it->key(), it->value()); } TF_CHECK_OK(it->status()); } { std::unique_ptr<WritableFile> file; TF_CHECK_OK(Env::Default()->NewWritableFile(fname, &file)); table::TableBuilder builder(options, file.get()); for (const auto& entry : entries) { SavedTensorSlices sts; CHECK(sts.ParseFromString(entry.second)); builder.Add(entry.first, mutator(std::move(sts))); } TF_CHECK_OK(builder.Finish()); TF_CHECK_OK(file->Close()); } } TEST(TensorSliceReaderTest, MissingTensorType) { const string fname = io::JoinPath(testing::TmpDir(), "invalid_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorShape shape({4, 5}); TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { if (sts.has_meta()) { for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { tensor.clear_type(); } } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); TF_CHECK_OK(reader.status()); EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); std::unique_ptr<Tensor> tensor; EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); } TEST(TensorSliceReaderTest, UnsupportedTensorType) { const string fname = io::JoinPath(testing::TmpDir(), "int32_ref_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorShape shape({4, 5}); TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { if (sts.has_meta()) { for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { tensor.set_type(DT_INT32_REF); } } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); TF_CHECK_OK(reader.status()); EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); std::unique_ptr<Tensor> tensor; EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); } TEST(TensorSliceReaderTest, NegativeTensorShapeDimension) { const string fname = io::JoinPath(testing::TmpDir(), "negative_dim_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}), TensorSlice::ParseOrDie("0,2:-"), data)); TF_CHECK_OK(writer.Finish()); MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { if (sts.has_meta()) { for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { for (auto& dim : *tensor.mutable_shape()->mutable_dim()) { dim.set_size(-dim.size()); } } } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); EXPECT_FALSE(reader.status().ok()); } TEST(TensorSliceReaderTest, InvalidTensorSlice) { const string fname = io::JoinPath(testing::TmpDir(), "invalid_slice_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}), TensorSlice::ParseOrDie("0,2:-"), data)); TF_CHECK_OK(writer.Finish()); MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { if (sts.has_meta()) { for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { tensor.mutable_slice(0)->mutable_extent(0)->set_length(-10); } } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); EXPECT_FALSE(reader.status().ok()); } TEST(TensorSliceReaderTest, MissingTensorData) { const string fname = io::JoinPath(testing::TmpDir(), "missing_data_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TF_ASSERT_OK(writer.Add("test", TensorShape({4, 5}), TensorSlice::ParseOrDie("0,2:-"), data)); TF_ASSERT_OK(writer.Finish()); MutateSavedTensorSlices(fname, [&](SavedTensorSlices sts) { if (sts.has_data()) { Fill(data, 4, sts.mutable_data()->mutable_data()); } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); TF_ASSERT_OK(reader.status()); EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); std::unique_ptr<Tensor> tensor; EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); } void CachedTensorSliceReaderTesterHelper( const TensorSliceWriter::CreateBuilderFunction& create_function, const TensorSliceReader::OpenTableFunction& open_function) { const string fname_base = io::JoinPath(testing::TmpDir(), "float_checkpoint"); TensorShape shape({4, 5}); { const string fname = strings::StrCat(fname_base, "_0"); TensorSliceWriter writer(fname, create_function); const float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); } { const string fname = strings::StrCat(fname_base, "_1"); TensorSliceWriter writer(fname, create_function); { const float data[] = {10, 11, 12, 15, 16, 17}; TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } { const float data[] = {18, 19}; TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } TF_CHECK_OK(writer.Finish()); } TensorSliceReaderCache cache; const string filepattern = strings::StrCat(fname_base, "_*"); const TensorSliceReader* reader = cache.GetReader( filepattern, open_function, TensorSliceReader::kLoadAllShards); EXPECT_TRUE(reader != nullptr); EXPECT_EQ(2, reader->num_files()); { TensorShape shape; DataType type; EXPECT_TRUE(reader->HasTensor("test", &shape, &type)); EXPECT_EQ("[4,5]", shape.DebugString()); EXPECT_EQ(DT_FLOAT, type); EXPECT_FALSE(reader->HasTensor("don't exist", nullptr, nullptr)); } const TensorSliceReader* reader2 = cache.GetReader( filepattern, open_function, TensorSliceReader::kLoadAllShards); EXPECT_EQ(reader, reader2); reader = cache.GetReader("file_does_not_exist", open_function, TensorSliceReader::kLoadAllShards); EXPECT_TRUE(reader == nullptr); } TEST(CachedTensorSliceReaderTest, SimpleFloat) { CachedTensorSliceReaderTesterHelper(CreateTableTensorSliceBuilder, OpenTableTensorSliceReader); } static void VersionTest(const VersionDef& versions, const string& error) { const string path = io::JoinPath(testing::TmpDir(), "checkpoint"); { SavedTensorSlices sts; *sts.mutable_meta()->mutable_versions() = versions; string contents; EXPECT_TRUE(sts.SerializeToString(&contents)); TensorSliceWriter::Builder* builder; TF_ASSERT_OK(CreateTableTensorSliceBuilder(path, &builder)); builder->Add(kSavedTensorSlicesKey, contents); int64_t file_size; TF_EXPECT_OK(builder->Finish(&file_size)); delete builder; } TensorSliceReader reader(path, OpenTableTensorSliceReader); EXPECT_TRUE(reader.status().code() == error::INVALID_ARGUMENT && absl::StartsWith(reader.status().message(), error)) << "Expected error starting with '" << errors::InvalidArgument(error) << "', got '" << reader.status() << "'"; } TEST(CheckpointVersionTest, MinConsumer) { VersionDef versions; versions.set_producer(TF_CHECKPOINT_VERSION + 1); versions.set_min_consumer(TF_CHECKPOINT_VERSION + 1); VersionTest( versions, strings::StrCat("Checkpoint min consumer version ", TF_CHECKPOINT_VERSION + 1, " above current version ", TF_CHECKPOINT_VERSION, " for TensorFlow")); } TEST(CheckpointVersionTest, MinProducer) { VersionDef versions; versions.set_producer(TF_CHECKPOINT_VERSION_MIN_PRODUCER - 1); VersionTest(versions, strings::StrCat("Checkpoint producer version ", TF_CHECKPOINT_VERSION_MIN_PRODUCER - 1, " below min producer ", TF_CHECKPOINT_VERSION_MIN_PRODUCER, " supported by TensorFlow")); } TEST(CheckpointVersionTest, BadConsumer) { VersionDef versions; versions.set_producer(TF_CHECKPOINT_VERSION + 1); versions.add_bad_consumers(TF_CHECKPOINT_VERSION); VersionTest( versions, strings::StrCat( "Checkpoint disallows consumer version ", TF_CHECKPOINT_VERSION, ". Please upgrade TensorFlow: this version is likely buggy.")); } } } }
bool TensorSliceReader::HasTensor(const string& name, TensorShape* shape, DataType* type) const { mutex_lock l(mu_); const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); if (!tss && !all_shards_loaded_) { VLOG(1) << "Did not find tensor in preferred shard, loading all shards: " << name; LoadAllShards(); tss = gtl::FindPtrOrNull(tensors_, name); } if (tss) { if (shape) { *shape = tss->shape(); } if (type) { *type = tss->type(); } return true; } else { return false; } }
TEST(TensorSliceReaderTest, SimpleFloat) { SimpleFloatHelper(CreateTableTensorSliceBuilder, OpenTableTensorSliceReader); } TEST(TensorSliceReaderTest, MissingTensorType) { const string fname = io::JoinPath(testing::TmpDir(), "invalid_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorShape shape({4, 5}); TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { if (sts.has_meta()) { for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { tensor.clear_type(); } } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); TF_CHECK_OK(reader.status()); EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); std::unique_ptr<Tensor> tensor; EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); } TEST(CachedTensorSliceReaderTest, SimpleFloat) { CachedTensorSliceReaderTesterHelper(CreateTableTensorSliceBuilder, OpenTableTensorSliceReader); }
#include "tensorflow/c/eager/parallel_device/parallel_device.h" #include <cstring> #include <memory> #include "absl/strings/str_cat.h" #include "absl/types/optional.h" #include "absl/types/variant.h" #include "tensorflow/c/c_api.h" #include "tensorflow/c/eager/c_api.h" #include "tensorflow/c/eager/c_api_experimental.h" #include "tensorflow/c/eager/parallel_device/parallel_device_lib.h" #include "tensorflow/c/eager/tfe_tensorhandle_internal.h" #include "tensorflow/c/tf_status.h" #include "tensorflow/c/tf_status_helper.h" namespace tensorflow { namespace parallel_device { namespace { class OpDeleter { public: void operator()(TFE_Op* to_delete) const { TFE_DeleteOp(to_delete); } }; using OpPtr = std::unique_ptr<TFE_Op, OpDeleter>; using MaybeParallelTensorOwned = absl::variant<std::unique_ptr<ParallelTensor>, TensorHandlePtr>; using MaybeParallelTensorUnowned = absl::variant<ParallelTensor*, TFE_TensorHandle*>; class NamedParallelDevice { public: NamedParallelDevice(const std::string& name, std::unique_ptr<ParallelDevice> parallel_device) : device_name_(name), parallel_device_(std::move(parallel_device)) {} const std::string& name() const { return device_name_; } const ParallelDevice& device() const { return *parallel_device_; } private: std::string device_name_; std::unique_ptr<ParallelDevice> parallel_device_; }; absl::optional<std::vector<MaybeParallelTensorOwned>> ExecuteWithSpecialOps( const ParallelDevice& parallel_device, const std::string& parallel_device_name, TFE_Context* context, std::vector<MaybeParallelTensorUnowned> inputs, const char* operation_name, const TFE_OpAttrs* attributes, int expected_max_outputs, TF_Status* status) { absl::optional<std::vector<MaybeParallelTensorOwned>> result; if (operation_name == std::string("TPUReplicatedInput")) { if (inputs.size() != parallel_device.num_underlying_devices()) { std::string message(absl::StrCat( "The parallel device ", parallel_device_name, " expected ", parallel_device.num_underlying_devices(), " inputs to TPUReplicatedInput, but got ", inputs.size())); TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str()); return result; } std::vector<TensorHandlePtr> components; components.reserve(inputs.size()); for (int i = 0; i < inputs.size(); ++i) { if (absl::holds_alternative<ParallelTensor*>(inputs[i])) { std::string message(absl::StrCat( "Expected all inputs to TPUReplicatedInput to be non-parallel " "TensorHandles. The input ", i, " was a parallel tensor (already " "placed on the parallel device).")); TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str()); return result; } components.emplace_back(TFE_TensorHandleCopySharingTensor( absl::get<TFE_TensorHandle*>(inputs[i]), status)); } std::vector<MaybeParallelTensorOwned> result_content; result_content.reserve(1); result_content.push_back(ParallelTensor::FromTensorHandles( parallel_device, std::move(components), status)); if (TF_GetCode(status) != TF_OK) return result; result.emplace(std::move(result_content)); return result; } else if (operation_name == std::string("TPUReplicatedOutput")) { OpPtr op(TFE_NewOp(context, operation_name, status)); TFE_OpAddAttrs(op.get(), attributes); int expected_outputs = TFE_OpGetOutputLength(op.get(), "outputs", status); if (TF_GetCode(status) != TF_OK) return result; if (expected_outputs != parallel_device.num_underlying_devices()) { std::string message(absl::StrCat( "The parallel device ", parallel_device_name, " expected ", parallel_device.num_underlying_devices(), " outputs for TPUReplicatedOutput, but got ", expected_outputs)); TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str()); return result; } if (absl::holds_alternative<TFE_TensorHandle*>(inputs[0])) { TF_SetStatus(status, TF_INVALID_ARGUMENT, "Expected the input to " "TPUReplicatedOutput to be a parallel tensor (placed on the " "parallel device)."); return result; } ParallelTensor* t = absl::get<ParallelTensor*>(inputs[0]); std::vector<MaybeParallelTensorOwned> outputs; outputs.reserve(t->num_tensors()); for (int i = 0; i < t->num_tensors(); ++i) { TensorHandlePtr this_output( TFE_TensorHandleCopySharingTensor(t->tensor(i), status)); outputs.emplace_back(std::move(this_output)); if (TF_GetCode(status) != TF_OK) return result; } result.emplace(std::move(outputs)); return result; } std::vector<ParallelTensor*> parallel_inputs; std::vector<std::unique_ptr<ParallelTensor>> implicitly_broadcast_tensors; parallel_inputs.reserve(inputs.size()); implicitly_broadcast_tensors.reserve(inputs.size()); for (const auto& input : inputs) { if (absl::holds_alternative<TFE_TensorHandle*>(input)) { if (operation_name == std::string("_EagerConst")) { std::unique_ptr<ParallelTensor> parallel_tensor( parallel_device.CopyToParallelDevice( context, absl::get<TFE_TensorHandle*>(input), status)); if (TF_GetCode(status) != TF_OK) return absl::nullopt; parallel_inputs.push_back(parallel_tensor.get()); implicitly_broadcast_tensors.emplace_back(std::move(parallel_tensor)); } else { TF_SetStatus( status, TF_INVALID_ARGUMENT, absl::StrCat( "Got a non-parallel tensor ", tensorflow::unwrap(absl::get<TFE_TensorHandle*>(input)) ->DebugString(), " as input to a parallel operation. First pack non-parallel " "tensors for each device into a parallel tensor explicitly.") .c_str()); return absl::nullopt; } } else { parallel_inputs.push_back(absl::get<ParallelTensor*>(input)); } } absl::optional<std::vector<std::unique_ptr<ParallelTensor>>> maybe_parallel_results( parallel_device.Execute(context, parallel_inputs, operation_name, attributes, expected_max_outputs, status)); if (!maybe_parallel_results.has_value()) return result; std::vector<std::unique_ptr<ParallelTensor>> parallel_results( std::move(maybe_parallel_results.value())); std::vector<MaybeParallelTensorOwned> result_content; result_content.reserve(parallel_results.size()); for (std::unique_ptr<ParallelTensor>& parallel_result : parallel_results) { result_content.push_back( MaybeParallelTensorOwned(std::move(parallel_result))); } result.emplace(std::move(result_content)); return result; } void ParallelTensorDeallocator(void* data) { delete reinterpret_cast<ParallelTensor*>(data); } int ParallelTensorNumDims(void* data, TF_Status* status) { const std::vector<int64_t>* shape; Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape); if (!s.ok()) { tsl::Set_TF_Status_from_Status(status, s); return -1; } return shape->size(); } int64_t ParallelTensorDim(void* data, int dim_index, TF_Status* status) { const std::vector<int64_t>* shape; Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape); if (!s.ok()) { tsl::Set_TF_Status_from_Status(status, s); return -1; } return (*shape)[dim_index]; } TF_Buffer* ParallelTensorSummarize(void* data, TF_Status* status) { ParallelTensor* parallel_tensor = reinterpret_cast<ParallelTensor*>(data); std::string summary; Status cpp_status = parallel_tensor->SummarizeValue(summary); if (!cpp_status.ok()) { tsl::Set_TF_Status_from_Status(status, cpp_status); return nullptr; } return TF_NewBufferFromString(summary.data(), summary.size()); } TensorHandlePtr ParallelTensorToTensorHandle( const std::string& parallel_device_name, TFE_Context* context, std::unique_ptr<ParallelTensor> t, TF_Status* status) { ParallelTensor* t_released = t.release(); TFE_CustomDeviceTensorHandleMethods handle_methods; handle_methods.num_dims = &ParallelTensorNumDims; handle_methods.dim = &ParallelTensorDim; handle_methods.deallocator = &ParallelTensorDeallocator; handle_methods.summarize = &ParallelTensorSummarize; return TensorHandlePtr(TFE_NewCustomDeviceTensorHandle( context, parallel_device_name.c_str(), t_released->dtype(), t_released, handle_methods, status)); } TFE_TensorHandle* CopyToParallelDevice(TFE_Context* context, TFE_TensorHandle* tensor, TF_Status* status, void* device_info) { TF_SetStatus( status, TF_UNIMPLEMENTED, absl::StrCat("Trying to copy a tensor ", tensorflow::unwrap(tensor)->DebugString(), " on to a parallel device. Pack non-parallel " "tensors for each device into a parallel tensor explicitly.") .c_str()); return nullptr; } TFE_TensorHandle* CopyTensorFromParallelDevice(TFE_Context* context, TFE_TensorHandle* tensor, const char* target_device_name, TF_Status* status, void* device_info) { ParallelTensor* parallel_tensor = reinterpret_cast<ParallelTensor*>( TFE_TensorHandleDevicePointer(tensor, status)); if (TF_GetCode(status) != TF_OK) return nullptr; if (parallel_tensor->num_tensors() == 1) { return TFE_TensorHandleCopySharingTensor(parallel_tensor->tensor(0), status); } else { TF_SetStatus( status, TF_UNIMPLEMENTED, absl::StrCat( "Trying to copy a tensor out of a parallel device. Since there " "are multiple components to parallel tensors, they must be " "unpacked explicitly.\n", tensorflow::unwrap(tensor)->DebugString()) .c_str()); return nullptr; } } void ParallelDeviceExecute(const TFE_Op* original_op, int* num_outputs, TFE_TensorHandle** outputs, TF_Status* status, void* device_info) { const char* requested_placement = TFE_OpGetDevice(original_op, status); if (*requested_placement == '\0') { TF_SetStatus( status, TF_INTERNAL, "Ops must be placed on the parallel device explicitly, or their inputs " "first un-packed. Got an un-placed op with an input placed on the " "parallel device."); return; } TFE_Context* context = TFE_OpGetContext(original_op, status); if (TF_GetCode(status) != TF_OK) return; const char* operation_name = TFE_OpGetName(original_op, status); if (TF_GetCode(status) != TF_OK) return; const TFE_OpAttrs* attributes = TFE_OpGetAttrs(original_op); NamedParallelDevice* named_device = reinterpret_cast<NamedParallelDevice*>(device_info); std::vector<MaybeParallelTensorUnowned> typed_inputs; int num_inputs = TFE_OpGetFlatInputCount(original_op, status); if (TF_GetCode(status) != TF_OK) return; typed_inputs.reserve(num_inputs); for (int i = 0; i < num_inputs; ++i) { TFE_TensorHandle* input = TFE_OpGetFlatInput(original_op, i, status); if (TF_GetCode(status) != TF_OK) return; const char* tensor_handle_device = TFE_TensorHandleDeviceName(input, status); if (TF_GetCode(status) != TF_OK) return; if (named_device->name() == tensor_handle_device) { typed_inputs.emplace_back(reinterpret_cast<ParallelTensor*>( TFE_TensorHandleDevicePointer(input, status))); if (TF_GetCode(status) != TF_OK) return; } else { typed_inputs.emplace_back(input); } } absl::optional<std::vector<MaybeParallelTensorOwned>> maybe_typed_outputs( ExecuteWithSpecialOps(named_device->device(), named_device->name(), context, std::move(typed_inputs), operation_name, attributes, *num_outputs, status)); if (TF_GetCode(status) != TF_OK) return; if (!maybe_typed_outputs.has_value()) { TF_SetStatus(status, TF_INTERNAL, "OK status but no value was returned."); return; } std::vector<MaybeParallelTensorOwned> typed_outputs( std::move(maybe_typed_outputs.value())); if (typed_outputs.size() > *num_outputs) { TF_SetStatus(status, TF_INTERNAL, "The allocated output buffer was too small."); return; } for (int i = 0; i < typed_outputs.size(); ++i) { MaybeParallelTensorOwned typed_output(std::move(typed_outputs[i])); if (absl::holds_alternative<TensorHandlePtr>(typed_output)) { outputs[i] = absl::get<TensorHandlePtr>(typed_output).release(); } else { outputs[i] = ParallelTensorToTensorHandle( named_device->name(), context, std::move(absl::get<std::unique_ptr<ParallelTensor>>( typed_output)), status) .release(); if (TF_GetCode(status) != TF_OK) return; } } *num_outputs = typed_outputs.size(); } void DeleteParallelDevice(void* device_info) { delete reinterpret_cast<NamedParallelDevice*>(device_info); } } void AllocateParallelDevice(const char* device_name, const char* const* underlying_devices, int num_underlying_devices, TFE_CustomDevice* device, void** device_info) { device->copy_tensor_to_device = &CopyToParallelDevice; device->copy_tensor_from_device = &CopyTensorFromParallelDevice; device->delete_device = &DeleteParallelDevice; device->execute = &ParallelDeviceExecute; std::vector<std::string> underlying_devices_vector; underlying_devices_vector.reserve(num_underlying_devices); for (int device_index = 0; device_index < num_underlying_devices; ++device_index) { underlying_devices_vector.push_back(underlying_devices[device_index]); } std::unique_ptr<ParallelDevice> parallel_device( new ParallelDevice(underlying_devices_vector)); *device_info = new NamedParallelDevice{device_name, std::move(parallel_device)}; } } }
#include "tensorflow/c/eager/parallel_device/parallel_device.h" #include <array> #include "tensorflow/c/c_api.h" #include "tensorflow/c/c_api_experimental.h" #include "tensorflow/c/eager/c_api.h" #include "tensorflow/c/eager/c_api_experimental.h" #include "tensorflow/c/eager/immediate_execution_tensor_handle.h" #include "tensorflow/c/eager/parallel_device/parallel_device_testlib.h" #include "tensorflow/c/eager/tfe_tensorhandle_internal.h" #include "tensorflow/c/tf_status_internal.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace parallel_device { using ::testing::HasSubstr; TEST(PARALLEL_DEVICE, TestBasicCPU) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config( TF_CreateConfig( false, true, 2), TF_DeleteBuffer); TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length, status.get()); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); BasicTestsForTwoDevices(context.get(), "/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:CPU:1"); } TEST(PARALLEL_DEVICE, TestBasicCPUAliased) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); BasicTestsForTwoDevices(context.get(), "/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:CPU:0"); } TEST(PARALLEL_DEVICE, TestBasicTPUAliased) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::unique_ptr<TF_DeviceList, decltype(&TF_DeleteDeviceList)> devices( TFE_ContextListDevices(context.get(), status.get()), TF_DeleteDeviceList); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); bool has_tpu = false; for (int device_index = 0; device_index < TF_DeviceListCount(devices.get()); ++device_index) { std::string device_type = TF_DeviceListType(devices.get(), device_index, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); if (device_type == "TPU") { has_tpu = true; break; } } if (has_tpu) { BasicTestsForTwoDevices(context.get(), "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:0"); } } TEST(PARALLEL_DEVICE, TestExplicitCopies) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config( TF_CreateConfig( false, true, 2), TF_DeleteBuffer); TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length, status.get()); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; const char* first_device_name = "/job:localhost/replica:0/task:0/device:CPU:0"; const char* second_device_name = "/job:localhost/replica:0/task:0/device:CPU:1"; std::array<const char*, 2> underlying_devices{first_device_name, second_device_name}; RegisterParallelDevice(context.get(), device_name, underlying_devices, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr cpu_value(FloatTensorHandle(3., status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr failed_copy_on_result(TFE_TensorHandleCopyToDevice( cpu_value.get(), context.get(), device_name, status.get())); EXPECT_EQ(TF_GetCode(status.get()), TF_UNIMPLEMENTED); std::array<TFE_TensorHandle*, 2> components{cpu_value.get(), cpu_value.get()}; TensorHandlePtr device_value = CreatePerDeviceValues( context.get(), components, device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr copy_off(TFE_TensorHandleCopyToDevice( device_value.get(), context.get(), first_device_name, status.get())); EXPECT_EQ(TF_GetCode(status.get()), TF_UNIMPLEMENTED); } TEST(PARALLEL_DEVICE, TestDifferentShapes) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config( TF_CreateConfig( false, true, 2), TF_DeleteBuffer); TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length, status.get()); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; std::array<const char*, 2> underlying_devices{ "/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:CPU:1"}; RegisterParallelDevice(context.get(), device_name, underlying_devices, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::vector<float> size_two_value{1., 2.}; std::vector<float> size_three_value{1., 2., 3.}; TensorHandlePtr size_two( VectorFloatTensorHandle(size_two_value, status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr size_three( VectorFloatTensorHandle(size_three_value, status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TFE_TensorHandle*, 2> components{size_two.get(), size_three.get()}; TensorHandlePtr combined_value = CreatePerDeviceValues( context.get(), components, device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); int num_axes = TFE_TensorHandleNumDims(combined_value.get(), status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); EXPECT_EQ(num_axes, 1); } TEST(PARALLEL_DEVICE, TestNestedParallelDevices) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config( TF_CreateConfig( false, true, 3), TF_DeleteBuffer); TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length, status.get()); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); const char* first_device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; std::array<const char*, 2> first_underlying_devices{ "/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:CPU:1"}; RegisterParallelDevice(context.get(), first_device_name, first_underlying_devices, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); const char* second_device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:1"; std::array<const char*, 2> second_underlying_devices{ "/job:localhost/replica:0/task:0/device:CUSTOM:0", "/job:localhost/replica:0/task:0/device:CPU:2"}; RegisterParallelDevice(context.get(), second_device_name, second_underlying_devices, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr value_one(FloatTensorHandle(1., status.get())); TensorHandlePtr value_two(FloatTensorHandle(2., status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()}; TensorHandlePtr first_combined_value = CreatePerDeviceValues( context.get(), components, first_device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr value_three(FloatTensorHandle(3., status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); components[0] = first_combined_value.get(); components[1] = value_three.get(); TensorHandlePtr second_combined_value = CreatePerDeviceValues( context.get(), components, second_device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr negative_one_cpu(FloatTensorHandle(3., status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); components[0] = negative_one_cpu.get(); components[1] = negative_one_cpu.get(); TensorHandlePtr first_negative_one = CreatePerDeviceValues( context.get(), components, first_device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); components[0] = first_negative_one.get(); components[1] = negative_one_cpu.get(); TensorHandlePtr second_negative_one = CreatePerDeviceValues( context.get(), components, second_device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr multiply_result( Multiply(context.get(), second_combined_value.get(), second_negative_one.get(), status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TensorHandlePtr, 2> second_components; ExtractPerDeviceValues(context.get(), multiply_result.get(), &second_components, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); ExpectScalarEq<float>(second_components[1].get(), 9.); std::string first_device = TFE_TensorHandleBackingDeviceName( second_components[0].get(), status.get()); ASSERT_EQ(second_underlying_devices[0], first_device); std::string second_device = TFE_TensorHandleBackingDeviceName( second_components[1].get(), status.get()); ASSERT_EQ(second_underlying_devices[1], second_device); std::array<TensorHandlePtr, 2> first_components; ExtractPerDeviceValues(context.get(), second_components[0].get(), &first_components, status.get()); ExpectScalarEq<float>(first_components[0].get(), 3.); ExpectScalarEq<float>(first_components[1].get(), 6.); first_device = TFE_TensorHandleBackingDeviceName(first_components[0].get(), status.get()); ASSERT_EQ(first_underlying_devices[0], first_device); second_device = TFE_TensorHandleBackingDeviceName(first_components[1].get(), status.get()); ASSERT_EQ(first_underlying_devices[1], second_device); } TEST(PARALLEL_DEVICE, TestInvalidPacking) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; std::array<const char*, 1> underlying_devices{ "/job:localhost/replica:0/task:0/device:CPU:0"}; RegisterParallelDevice(context.get(), device_name, underlying_devices, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr value_one(FloatTensorHandle(1., status.get())); TensorHandlePtr value_two(FloatTensorHandle(2., status.get())); { ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()}; TensorHandlePtr combined_value = CreatePerDeviceValues( context.get(), components, device_name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT) << TF_Message(status.get()); } { std::array<TFE_TensorHandle*, 1> correct_components{value_one.get()}; TensorHandlePtr combined_value = CreatePerDeviceValues( context.get(), correct_components, device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TensorHandlePtr, 2> incorrect_components; ExtractPerDeviceValues(context.get(), combined_value.get(), &incorrect_components, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT) << TF_Message(status.get()); } { std::array<TFE_TensorHandle*, 1> correct_components{value_one.get()}; TensorHandlePtr combined_value = CreatePerDeviceValues( context.get(), correct_components, device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TFE_TensorHandle*, 1> incorrect_components{combined_value.get()}; TensorHandlePtr recombined_value = CreatePerDeviceValues( context.get(), incorrect_components, device_name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT) << TF_Message(status.get()); } { std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op( TFE_NewOp(context.get(), "TPUReplicatedOutput", status.get()), TFE_DeleteOp); if (TF_GetCode(status.get()) != TF_OK) return; TFE_OpSetAttrInt(op.get(), "num_replicas", 1); TFE_OpAddInput(op.get(), value_one.get(), status.get()); if (TF_GetCode(status.get()) != TF_OK) return; TFE_OpSetDevice(op.get(), device_name, status.get()); if (TF_GetCode(status.get()) != TF_OK) return; TFE_TensorHandle* result_handles; int num_retvals = 1; TFE_Execute(op.get(), &result_handles, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT) << TF_Message(status.get()); } } TensorHandlePtr CollectiveSum(TFE_Context* context, TFE_TensorHandle* input, int group_size, TF_Status* status) { std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op( TFE_NewOp(context, "CollectiveReduce", status), TFE_DeleteOp); if (TF_GetCode(status) != TF_OK) return nullptr; const char* device = TFE_TensorHandleDeviceName(input, status); if (TF_GetCode(status) != TF_OK) return nullptr; TFE_OpSetDevice(op.get(), device, status); if (TF_GetCode(status) != TF_OK) return nullptr; TFE_OpSetAttrType(op.get(), "T", TFE_TensorHandleDataType(input)); TFE_OpSetAttrInt(op.get(), "group_size", group_size); TFE_OpSetAttrInt(op.get(), "group_key", 0); TFE_OpSetAttrInt(op.get(), "instance_key", 0); const std::string merge_op("Add"); TFE_OpSetAttrString(op.get(), "merge_op", merge_op.c_str(), merge_op.length()); const std::string final_op("Id"); TFE_OpSetAttrString(op.get(), "final_op", final_op.c_str(), final_op.length()); TFE_OpSetAttrIntList(op.get(), "subdiv_offsets", nullptr, 0); TFE_OpAddInput(op.get(), input, status); if (TF_GetCode(status) != TF_OK) return nullptr; TFE_TensorHandle* result_handle; int num_retvals = 1; TFE_Execute(op.get(), &result_handle, &num_retvals, status); if (TF_GetCode(status) != TF_OK) return nullptr; return TensorHandlePtr(result_handle); } void TestCollective(bool async) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); TFE_ContextOptionsSetAsync(opts.get(), async); std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config( TF_CreateConfig( false, true, 2), TF_DeleteBuffer); TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length, status.get()); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; std::array<const char*, 2> underlying_devices{ "/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:CPU:1"}; RegisterParallelDevice(context.get(), device_name, underlying_devices, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr value_one(FloatTensorHandle(1., status.get())); TensorHandlePtr value_two(FloatTensorHandle(2., status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()}; TensorHandlePtr parallel_value = CreatePerDeviceValues( context.get(), components, device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr reduced( CollectiveSum(context.get(), parallel_value.get(), 2, status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TensorHandlePtr, 2> result_components; ExtractPerDeviceValues(context.get(), reduced.get(), &result_components, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); ExpectScalarEq<float>(result_components[0].get(), 3.); ExpectScalarEq<float>(result_components[1].get(), 3.); } TEST(PARALLEL_DEVICE, TestCollectiveSync) { TestCollective(false); } TEST(PARALLEL_DEVICE, TestCollectiveAsync) { TestCollective(true); } void RegisterCollectiveMulFunction(TFE_Context* context, const char* function_name, int group_size, TF_Status* status) { std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> body(TF_NewGraph(), TF_DeleteGraph); TF_OperationDescription* placeholder_desc = TF_NewOperation(body.get(), "Placeholder", "Placeholder"); TF_SetAttrType(placeholder_desc, "dtype", TF_FLOAT); TF_Operation* placeholder_op = TF_FinishOperation(placeholder_desc, status); if (TF_GetCode(status) != TF_OK) return; TF_Output x{placeholder_op, 0}; TF_OperationDescription* reduce_desc = TF_NewOperation(body.get(), "CollectiveReduce", "CollectiveReduce"); TF_SetAttrType(reduce_desc, "T", TF_FLOAT); TF_SetAttrInt(reduce_desc, "group_size", group_size); TF_SetAttrInt(reduce_desc, "group_key", 0); TF_SetAttrInt(reduce_desc, "instance_key", 0); const std::string merge_op("Mul"); TF_SetAttrString(reduce_desc, "merge_op", merge_op.c_str(), merge_op.length()); const std::string final_op("Id"); TF_SetAttrString(reduce_desc, "final_op", final_op.c_str(), final_op.length()); TF_SetAttrIntList(reduce_desc, "subdiv_offsets", nullptr, 0); TF_AddInput(reduce_desc, x); TF_Operation* reduce_op = TF_FinishOperation(reduce_desc, status); if (TF_GetCode(status) != TF_OK) return; TF_Operation* operations[]{placeholder_op, reduce_op}; TF_Output y{reduce_op, 0}; const char* output_name = "y"; std::unique_ptr<TF_Function, decltype(&TF_DeleteFunction)> function( TF_GraphToFunction( body.get(), function_name, 0, 2, operations, 1, &x, 1, &y, &output_name, nullptr, "", status), TF_DeleteFunction); if (TF_GetCode(status) != TF_OK) return; TFE_ContextAddFunction(context, function.get(), status); } TEST(PARALLEL_DEVICE, TestFunction) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config( TF_CreateConfig( false, true, 2), TF_DeleteBuffer); TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length, status.get()); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; std::array<const char*, 2> underlying_devices{ "/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:CPU:1"}; RegisterParallelDevice(context.get(), device_name, underlying_devices, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); const char* function_name = "test_reduce_mul"; RegisterCollectiveMulFunction(context.get(), function_name, 2, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr value_one(FloatTensorHandle(7., status.get())); TensorHandlePtr value_two(FloatTensorHandle(9., status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()}; TensorHandlePtr parallel_value = CreatePerDeviceValues( context.get(), components, device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op( TFE_NewOp(context.get(), function_name, status.get()), TFE_DeleteOp); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TFE_OpSetDevice(op.get(), device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TFE_OpAddInput(op.get(), parallel_value.get(), status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TFE_TensorHandle* raw_result_handle; int num_retvals = 1; TFE_Execute(op.get(), &raw_result_handle, &num_retvals, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr reduced(raw_result_handle); std::array<TensorHandlePtr, 2> result_components; ExtractPerDeviceValues(context.get(), reduced.get(), &result_components, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); ExpectScalarEq<float>(result_components[0].get(), 7. * 9.); ExpectScalarEq<float>(result_components[1].get(), 7. * 9.); std::string first_device = TFE_TensorHandleBackingDeviceName( result_components[0].get(), status.get()); ASSERT_EQ(underlying_devices[0], first_device); std::string second_device = TFE_TensorHandleBackingDeviceName( result_components[1].get(), status.get()); ASSERT_EQ(underlying_devices[1], second_device); } TEST(PARALLEL_DEVICE, TestSummaryString) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config( TF_CreateConfig( false, true, 2), TF_DeleteBuffer); TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length, status.get()); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; std::array<const char*, 2> underlying_devices{ "/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:CPU:1"}; RegisterParallelDevice(context.get(), device_name, underlying_devices, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); TensorHandlePtr cpu_value(FloatTensorHandle(3., status.get())); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::array<TFE_TensorHandle*, 2> components{cpu_value.get(), cpu_value.get()}; TensorHandlePtr device_value = CreatePerDeviceValues( context.get(), components, device_name, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); ImmediateExecutionTensorHandle* unwrapped_handle = tensorflow::unwrap(device_value.get()); std::string summarized; TF_ASSERT_OK(unwrapped_handle->SummarizeValue(summarized)); EXPECT_THAT(summarized, HasSubstr("\"CPU:0\": 3")); } } }
void DeleteParallelDevice(void* device_info) { delete reinterpret_cast<NamedParallelDevice*>(device_info); }
TEST(PARALLEL_DEVICE, TestBasicCPU) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config( TF_CreateConfig( false, true, 2), TF_DeleteBuffer); TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length, status.get()); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); BasicTestsForTwoDevices(context.get(), "/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:CPU:1"); } TEST(PARALLEL_DEVICE, TestBasicCPUAliased) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); BasicTestsForTwoDevices(context.get(), "/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:CPU:0"); } TEST(PARALLEL_DEVICE, TestBasicTPUAliased) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); std::unique_ptr<TF_DeviceList, decltype(&TF_DeleteDeviceList)> devices( TFE_ContextListDevices(context.get(), status.get()), TF_DeleteDeviceList); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); bool has_tpu = false; for (int device_index = 0; device_index < TF_DeviceListCount(devices.get()); ++device_index) { std::string device_type = TF_DeviceListType(devices.get(), device_index, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); if (device_type == "TPU") { has_tpu = true; break; } } if (has_tpu) { BasicTestsForTwoDevices(context.get(), "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:0"); } }
#include "tensorflow/core/kernels/data/experimental/unique_dataset_op.h" #include "tensorflow/core/framework/partial_tensor_shape.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/hash/hash.h" namespace tensorflow { namespace data { namespace experimental { constexpr const char* const UniqueDatasetOp::kDatasetType; constexpr const char* const UniqueDatasetOp::kInputDataset; constexpr const char* const UniqueDatasetOp::kOutputTypes; constexpr const char* const UniqueDatasetOp::kOutputShapes; class UniqueDatasetOp::Dataset : public DatasetBase { public: Dataset(OpKernelContext* ctx, const DatasetBase* input) : DatasetBase(DatasetContext(ctx)), input_(input) { input_->Ref(); } ~Dataset() override { input_->Unref(); } std::unique_ptr<IteratorBase> MakeIteratorInternal( const string& prefix) const override { return std::make_unique<Iterator>( Iterator::Params{this, strings::StrCat(prefix, "::Unique")}); } const DataTypeVector& output_dtypes() const override { return input_->output_dtypes(); } const std::vector<PartialTensorShape>& output_shapes() const override { return input_->output_shapes(); } string DebugString() const override { return strings::StrCat("UniqueDatasetOp::Dataset"); } Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override { inputs->push_back(input_); return absl::OkStatus(); } Status CheckExternalState() const override { return input_->CheckExternalState(); } protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node)); TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node}, output)); return absl::OkStatus(); } private: class Iterator : public DatasetIterator<Dataset> { public: explicit Iterator(const typename Iterator::Params& params) : DatasetIterator<Dataset>(params) {} Status Initialize(IteratorContext* ctx) override { return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_); } Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { mutex_lock l(mu_); bool saw_new_value; do { saw_new_value = false; out_tensors->clear(); TF_RETURN_IF_ERROR( input_impl_->GetNext(ctx, out_tensors, end_of_sequence)); if (*end_of_sequence) { break; } DCHECK_EQ(1, out_tensors->size()); saw_new_value = unique_elements_.insert((*out_tensors)[0]).second; } while (!saw_new_value); return absl::OkStatus(); } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { return model::MakeUnknownRatioNode(std::move(args)); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override { mutex_lock l(mu_); if (input_impl_) { TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_)); } else { TF_RETURN_IF_ERROR( writer->WriteScalar(full_name("input_impl_empty"), "")); } TF_RETURN_IF_ERROR(writer->WriteScalar(full_name("unique_elements_size"), unique_elements_.size())); size_t i = 0; for (const Tensor& t : unique_elements_) { TF_RETURN_IF_ERROR(writer->WriteTensor( full_name(strings::StrCat("unique_elements[", i++, "]")), t)); } return absl::OkStatus(); } Status RestoreInternal(IteratorContext* ctx, IteratorStateReader* reader) override { mutex_lock l(mu_); if (!reader->Contains(full_name("input_impl_empty"))) { TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_)); } else { input_impl_.reset(); } int64_t num_unique_elements; unique_elements_.clear(); TF_RETURN_IF_ERROR(reader->ReadScalar(full_name("unique_elements_size"), &num_unique_elements)); for (int64_t i = 0; i < num_unique_elements; ++i) { Tensor unique_element; TF_RETURN_IF_ERROR(reader->ReadTensor( ctx->flr(), full_name(strings::StrCat("unique_elements[", i, "]")), &unique_element)); auto insert_result = unique_elements_.insert(unique_element); if (!insert_result.second) { return errors::InvalidArgument( "Checkpoint contained two unique elements with the same " "value."); } } return absl::OkStatus(); } private: struct TensorHash { size_t operator()(const Tensor& t) const { if (t.dtype() == DT_INT32 || t.dtype() == DT_INT64) { return Hash64(t.tensor_data().data(), t.tensor_data().size()); } else { DCHECK_EQ(DT_STRING, t.dtype()); auto flat_t = t.flat<tstring>(); uint64 hash = 0; for (int64_t i = 0; i < t.NumElements(); ++i) { hash = Hash64Combine(hash, Hash64(flat_t(i))); } return static_cast<size_t>(hash); } } }; struct TensorKeyEqual { bool operator()(const Tensor& lhs, const Tensor& rhs) const { if (lhs.shape() != rhs.shape() || lhs.dtype() != rhs.dtype()) { return false; } switch (lhs.dtype()) { #define HANDLE_TYPE(T) \ case T: \ do { \ auto lhs_flat = lhs.flat<EnumToDataType<T>::Type>(); \ auto rhs_flat = rhs.flat<EnumToDataType<T>::Type>(); \ for (int64_t i = 0; i < lhs.NumElements(); ++i) { \ if (lhs_flat(i) != rhs_flat(i)) { \ return false; \ } \ } \ return true; \ } while (0) HANDLE_TYPE(DT_INT32); HANDLE_TYPE(DT_INT64); HANDLE_TYPE(DT_STRING); default: DCHECK(false) << "UniqueDataset unhandled data type: " << DataTypeString(lhs.dtype()); return false; } } }; mutex mu_; std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_); std::unordered_set<Tensor, TensorHash, TensorKeyEqual> unique_elements_ TF_GUARDED_BY(mu_); }; const DatasetBase* const input_; }; void UniqueDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { OP_REQUIRES(ctx, input->output_dtypes().size() == 1, errors::InvalidArgument("UniqueDataset only supports " "inputs with a single component.")); DataType input_dtype = input->output_dtypes()[0]; OP_REQUIRES(ctx, input_dtype == DT_INT32 || input_dtype == DT_INT64 || input_dtype == DT_STRING, errors::InvalidArgument( "UniqueDataset only supports inputs with a single " "`tf.int32`, `tf.int64`, or `tf.string` component.")); *output = new Dataset(ctx, input); } namespace { REGISTER_KERNEL_BUILDER(Name("UniqueDataset").Device(DEVICE_CPU), UniqueDatasetOp); REGISTER_KERNEL_BUILDER(Name("ExperimentalUniqueDataset").Device(DEVICE_CPU), UniqueDatasetOp); } } } }
#include "tensorflow/core/kernels/data/experimental/unique_dataset_op.h" #include "tensorflow/core/data/dataset_test_base.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/platform/env.h" namespace tensorflow { namespace data { namespace experimental { namespace { constexpr char kNodeName[] = "unique_dataset"; class UniqueDatasetParams : public DatasetParams { public: template <typename T> UniqueDatasetParams(T input_dataset_params, DataTypeVector output_dtypes, std::vector<PartialTensorShape> output_shapes) : DatasetParams(std::move(output_dtypes), std::move(output_shapes), kNodeName) { input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params)); iterator_prefix_ = name_utils::IteratorPrefix(input_dataset_params.dataset_type(), input_dataset_params.iterator_prefix()); } std::vector<Tensor> GetInputTensors() const override { return {}; } Status GetInputNames(std::vector<string>* input_names) const override { input_names->clear(); input_names->emplace_back(UniqueDatasetOp::kInputDataset); return absl::OkStatus(); } Status GetAttributes(AttributeVector* attributes) const override { *attributes = {{"output_types", output_dtypes_}, {"output_shapes", output_shapes_}, {"metadata", ""}}; return absl::OkStatus(); } string dataset_type() const override { return UniqueDatasetOp::kDatasetType; } }; class UniqueDatasetOpTest : public DatasetOpsTestBase {}; UniqueDatasetParams NormalCaseParams() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{12, 1}, {1, 1, 2, 3, 5, 8, 13, 3, 21, 8, 8, 34})}, "tensor_slice_dataset"); return UniqueDatasetParams(tensor_slice_dataset_params, {DT_INT64}, {PartialTensorShape({1})}); } UniqueDatasetParams LastRecordIsDuplicateParams() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{11, 1}, {1, 1, 2, 3, 5, 8, 13, 3, 21, 8, 8})}, "tensor_slice_dataset"); return UniqueDatasetParams(std::move(tensor_slice_dataset_params), {DT_INT64}, {PartialTensorShape({1})}); } UniqueDatasetParams AllRecordsTheSameParams() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{5, 1}, {1, 1, 1, 1, 1})}, "tensor_slice_dataset"); return UniqueDatasetParams(std::move(tensor_slice_dataset_params), {DT_INT64}, {PartialTensorShape({1})}); } UniqueDatasetParams EmptyInputParams() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{0, 1}, {})}, "tensor_slice_dataset"); return UniqueDatasetParams(std::move(tensor_slice_dataset_params), {DT_INT64}, {PartialTensorShape({1})}); } UniqueDatasetParams StringParams() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<tstring>( TensorShape{11, 1}, {"one", "One", "two", "three", "five", "eight", "thirteen", "twenty-one", "eight", "eight", "thirty-four"})}, "tensor_slice_dataset"); return UniqueDatasetParams(std::move(tensor_slice_dataset_params), {DT_STRING}, {PartialTensorShape({1})}); } UniqueDatasetParams TwoComponentsParams() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( { CreateTensor<int64_t>(TensorShape{1, 1}, {1}), CreateTensor<int64_t>(TensorShape{1, 1}, {42}), }, "tensor_slice_dataset"); return UniqueDatasetParams( std::move(tensor_slice_dataset_params), {DT_INT64, DT_INT64}, {PartialTensorShape({1}), PartialTensorShape({1})}); } UniqueDatasetParams NoInputParams() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {}, "tensor_slice_dataset"); return UniqueDatasetParams(std::move(tensor_slice_dataset_params), {DT_INT64}, {PartialTensorShape({})}); } UniqueDatasetParams FP32Params() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<float>(TensorShape{1, 1}, {3.14})}, "tensor_slice_dataset"); return UniqueDatasetParams(std::move(tensor_slice_dataset_params), {DT_FLOAT}, {PartialTensorShape({1})}); } std::vector<GetNextTestCase<UniqueDatasetParams>> GetNextTestCases() { return {{NormalCaseParams(), CreateTensors<int64_t>(TensorShape({1}), {{1}, {2}, {3}, {5}, {8}, {13}, {21}, {34}})}, {LastRecordIsDuplicateParams(), CreateTensors<int64_t>(TensorShape({1}), {{1}, {2}, {3}, {5}, {8}, {13}, {21}})}, {AllRecordsTheSameParams(), CreateTensors<int64_t>(TensorShape({1}), {{1}})}, {EmptyInputParams(), CreateTensors<int64_t>(TensorShape({1}), {})}, {StringParams(), CreateTensors<tstring>(TensorShape({1}), {{"one"}, {"One"}, {"two"}, {"three"}, {"five"}, {"eight"}, {"thirteen"}, {"twenty-one"}, {"thirty-four"}})}}; } ITERATOR_GET_NEXT_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams, GetNextTestCases()) TEST_F(UniqueDatasetOpTest, DatasetNodeName) { auto dataset_params = NormalCaseParams(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name())); } TEST_F(UniqueDatasetOpTest, DatasetTypeString) { auto dataset_params = NormalCaseParams(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetTypeString( name_utils::OpName(UniqueDatasetOp::kDatasetType))); } TEST_F(UniqueDatasetOpTest, DatasetOutputDtypes) { auto dataset_params = NormalCaseParams(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64})); } TEST_F(UniqueDatasetOpTest, DatasetOutputShapes) { auto dataset_params = NormalCaseParams(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({1})})); } std::vector<CardinalityTestCase<UniqueDatasetParams>> CardinalityTestCases() { return {{NormalCaseParams(), kUnknownCardinality}, {EmptyInputParams(), kUnknownCardinality}}; } DATASET_CARDINALITY_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams, CardinalityTestCases()) std::vector<IteratorOutputDtypesTestCase<UniqueDatasetParams>> IteratorOutputDtypesTestCases() { return {{NormalCaseParams(), {DT_INT64}}, {StringParams(), {DT_STRING}}}; } ITERATOR_OUTPUT_DTYPES_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams, IteratorOutputDtypesTestCases()) std::vector<IteratorOutputShapesTestCase<UniqueDatasetParams>> IteratorOutputShapesTestCases() { return {{NormalCaseParams(), {PartialTensorShape({1})}}, {StringParams(), {PartialTensorShape({1})}}}; } ITERATOR_OUTPUT_SHAPES_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams, IteratorOutputShapesTestCases()) TEST_F(UniqueDatasetOpTest, IteratorPrefix) { auto dataset_params = NormalCaseParams(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix( UniqueDatasetOp::kDatasetType, dataset_params.iterator_prefix()))); } std::vector<IteratorSaveAndRestoreTestCase<UniqueDatasetParams>> IteratorSaveAndRestoreTestCases() { return {{NormalCaseParams(), {0, 2, 6, 8}, CreateTensors<int64_t>(TensorShape({1}), {{1}, {2}, {3}, {5}, {8}, {13}, {21}, {34}})}, {LastRecordIsDuplicateParams(), {0, 2, 6, 8}, CreateTensors<int64_t>(TensorShape({1}), {{1}, {2}, {3}, {5}, {8}, {13}, {21}})}}; } ITERATOR_SAVE_AND_RESTORE_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams, IteratorSaveAndRestoreTestCases()) class ParameterizedInvalidInputTest : public UniqueDatasetOpTest, public ::testing::WithParamInterface<UniqueDatasetParams> {}; TEST_P(ParameterizedInvalidInputTest, InvalidInput) { auto dataset_params = GetParam(); auto result = Initialize(dataset_params); EXPECT_FALSE(result.ok()); } INSTANTIATE_TEST_SUITE_P(FilterDatasetOpTest, ParameterizedInvalidInputTest, ::testing::ValuesIn({TwoComponentsParams(), NoInputParams(), FP32Params()})); } } } }
const std::vector<PartialTensorShape>& output_shapes() const override { return input_->output_shapes(); }
TEST_F(UniqueDatasetOpTest, DatasetOutputShapes) { auto dataset_params = NormalCaseParams(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({1})})); }
#include "tensorstore/internal/nditerable_elementwise_output_transform.h" #include <array> #include <utility> #include "absl/status/status.h" #include "tensorstore/data_type.h" #include "tensorstore/index.h" #include "tensorstore/internal/arena.h" #include "tensorstore/internal/elementwise_function.h" #include "tensorstore/internal/nditerable.h" #include "tensorstore/internal/nditerable_buffer_management.h" #include "tensorstore/internal/nditerable_util.h" #include "tensorstore/internal/unique_with_intrusive_allocator.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" namespace tensorstore { namespace internal { namespace { struct ElementwiseOutputTransformNDIterator : public NDIterator::Base<ElementwiseOutputTransformNDIterator> { explicit ElementwiseOutputTransformNDIterator( const NDIterable* output, ElementwiseClosure<2, void*> closure, NDIterable::IterationBufferKindLayoutView layout, ArenaAllocator<> allocator) : output_(span(&output, 1), layout, allocator), context_(closure.context), elementwise_function_((*closure.function)[layout.buffer_kind]) {} ArenaAllocator<> get_allocator() const override { return output_.get_allocator(); } bool UpdateBlock(span<const Index> indices, IterationBufferShape block_shape, IterationBufferPointer pointer, absl::Status* status) override { return output_.GetBlock(indices, block_shape, status) && elementwise_function_(context_, block_shape, pointer, output_.block_pointers()[0], status) && output_.UpdateBlock(indices, block_shape, status); } NDIteratorsWithManagedBuffers<1> output_; void* context_; SpecializedElementwiseFunctionPointer<2, void*> elementwise_function_; }; struct ElementwiseOutputTransformNDIterable : public NDIterablesWithManagedBuffers< std::array<NDIterable::Ptr, 1>, NDIterable::Base<ElementwiseOutputTransformNDIterable>> { using Base = NDIterablesWithManagedBuffers< std::array<NDIterable::Ptr, 1>, NDIterable::Base<ElementwiseOutputTransformNDIterable>>; ElementwiseOutputTransformNDIterable(NDIterable::Ptr output, DataType input_dtype, ElementwiseClosure<2, void*> closure, ArenaAllocator<> allocator) : Base{{{std::move(output)}}}, input_dtype_(input_dtype), closure_(closure), allocator_(allocator) {} ArenaAllocator<> get_allocator() const override { return allocator_; } DataType dtype() const override { return input_dtype_; } NDIterator::Ptr GetIterator( NDIterable::IterationBufferKindLayoutView layout) const override { return MakeUniqueWithVirtualIntrusiveAllocator< ElementwiseOutputTransformNDIterator>( allocator_, this->iterables[0].get(), closure_, layout); } DataType input_dtype_; ElementwiseClosure<2, void*> closure_; ArenaAllocator<> allocator_; }; } NDIterable::Ptr GetElementwiseOutputTransformNDIterable( NDIterable::Ptr output, DataType input_dtype, ElementwiseClosure<2, void*> closure, Arena* arena) { return MakeUniqueWithVirtualIntrusiveAllocator< ElementwiseOutputTransformNDIterable>( ArenaAllocator<>(arena), std::move(output), input_dtype, closure); } } }
#include "tensorstore/internal/nditerable_elementwise_output_transform.h" #include <new> #include <tuple> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorstore/array.h" #include "tensorstore/contiguous_layout.h" #include "tensorstore/data_type.h" #include "tensorstore/index.h" #include "tensorstore/internal/arena.h" #include "tensorstore/internal/elementwise_function.h" #include "tensorstore/internal/nditerable_copy.h" #include "tensorstore/internal/nditerable_transformed_array.h" #include "tensorstore/internal/nditerable_util.h" #include "tensorstore/util/iterate.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::Index; using ::tensorstore::internal::NDIterableCopier; using ::testing::_; using ::testing::Pair; template <typename Func, typename SourceArray, typename DestArray> absl::Status TestCopy(Func func, tensorstore::IterationConstraints constraints, SourceArray source_array, DestArray dest_array) { tensorstore::internal::Arena arena; tensorstore::internal::ElementwiseClosure<2, void*> closure = tensorstore::internal::SimpleElementwiseFunction< Func(typename SourceArray::Element, typename DestArray::Element), void*>::Closure(&func); auto iterable = tensorstore::internal::GetElementwiseOutputTransformNDIterable( tensorstore::internal::GetTransformedArrayNDIterable(dest_array, &arena) .value(), tensorstore::dtype_v<typename SourceArray::Element>, closure, &arena); return tensorstore::internal::NDIterableCopier( *tensorstore::internal::GetTransformedArrayNDIterable(source_array, &arena) .value(), *iterable, dest_array.shape(), constraints, &arena) .Copy(); } TEST(NDIterableElementwiseOutputTransformTest, Basic) { auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}}); auto dest = tensorstore::AllocateArray<double>(source.shape()); TENSORSTORE_EXPECT_OK(TestCopy( [](const int* source, double* dest, void* status) { *dest = -*source; }, {}, source, dest)); EXPECT_EQ( tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}), dest); } TEST(NDIterableElementwiseOutputTransformTest, PartialCopy) { auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6}); auto dest = tensorstore::AllocateArray<double>( source.shape(), tensorstore::c_order, tensorstore::value_init); EXPECT_THAT(TestCopy( [](const int* source, double* dest, void* arg) { auto* status = static_cast<absl::Status*>(arg); if (*source == 0) { *status = absl::UnknownError("zero"); return false; } *dest = -*source; return true; }, tensorstore::c_order, source, dest), absl::UnknownError("zero")); EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}), dest); } }
NDIterable::Ptr GetElementwiseOutputTransformNDIterable( NDIterable::Ptr output, DataType input_dtype, ElementwiseClosure<2, void*> closure, Arena* arena) { return MakeUniqueWithVirtualIntrusiveAllocator< ElementwiseOutputTransformNDIterable>( ArenaAllocator<>(arena), std::move(output), input_dtype, closure); }
TEST(NDIterableElementwiseOutputTransformTest, Basic) { auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}}); auto dest = tensorstore::AllocateArray<double>(source.shape()); TENSORSTORE_EXPECT_OK(TestCopy( [](const int* source, double* dest, void* status) { *dest = -*source; }, {}, source, dest)); EXPECT_EQ( tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}), dest); } TEST(NDIterableElementwiseOutputTransformTest, PartialCopy) { auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6}); auto dest = tensorstore::AllocateArray<double>( source.shape(), tensorstore::c_order, tensorstore::value_init); EXPECT_THAT(TestCopy( [](const int* source, double* dest, void* arg) { auto* status = static_cast<absl::Status*>(arg); if (*source == 0) { *status = absl::UnknownError("zero"); return false; } *dest = -*source; return true; }, tensorstore::c_order, source, dest), absl::UnknownError("zero")); EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}), dest); }
#include "tensorstore/internal/nditerable_copy.h" #include <algorithm> #include <array> #include <cassert> #include <memory> #include <utility> #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "tensorstore/data_type.h" #include "tensorstore/index.h" #include "tensorstore/internal/arena.h" #include "tensorstore/internal/element_copy_function.h" #include "tensorstore/internal/elementwise_function.h" #include "tensorstore/internal/nditerable.h" #include "tensorstore/internal/nditerable_buffer_management.h" #include "tensorstore/internal/nditerable_util.h" #include "tensorstore/util/iterate.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" namespace tensorstore { namespace internal { NDIterableCopyManager::NDIterableCopyManager(const NDIterable* input, const NDIterable* output) : Base{{{input, output}}} { assert(input->dtype() == output->dtype()); } NDIterableCopyManager::BufferParameters NDIterableCopyManager::GetBufferParameters( NDIterable::IterationLayoutView layout) const { BufferParameters result; auto input_constraint = input()->GetIterationBufferConstraint(layout); auto output_constraint = output()->GetIterationBufferConstraint(layout); if (!input_constraint.external || !output_constraint.external) { result.input_buffer_kind = result.output_buffer_kind = std::max( input_constraint.min_buffer_kind, output_constraint.min_buffer_kind); } else { result.input_buffer_kind = input_constraint.min_buffer_kind; result.output_buffer_kind = output_constraint.min_buffer_kind; } result.buffer_source = input_constraint.external ? (output_constraint.external ? BufferSource::kExternal : BufferSource::kOutput) : (output_constraint.external ? BufferSource::kInput : BufferSource::kBoth); return result; } std::ptrdiff_t NDIterableCopyManager::GetWorkingMemoryBytesPerElement( NDIterable::IterationLayoutView layout) const { auto buffer_parameters = GetBufferParameters(layout); std::ptrdiff_t num_bytes = 0; num_bytes += input()->GetWorkingMemoryBytesPerElement( layout, buffer_parameters.input_buffer_kind); num_bytes += output()->GetWorkingMemoryBytesPerElement( layout, buffer_parameters.output_buffer_kind); if (buffer_parameters.buffer_source == BufferSource::kExternal) { num_bytes += input()->dtype()->size; if (std::max(buffer_parameters.input_buffer_kind, buffer_parameters.output_buffer_kind) == IterationBufferKind::kIndexed) { num_bytes += sizeof(Index); } } return num_bytes; } NDIteratorCopyManager::NDIteratorCopyManager( const NDIterableCopyManager& iterable, NDIterable::IterationBufferLayoutView layout, ArenaAllocator<> allocator) : buffer_manager_(allocator) { auto buffer_parameters = iterable.GetBufferParameters(layout); input_ = iterable.input()->GetIterator( {layout, buffer_parameters.input_buffer_kind}); output_ = iterable.output()->GetIterator( {layout, buffer_parameters.output_buffer_kind}); switch (buffer_parameters.buffer_source) { case NDIterableCopyManager::BufferSource::kBoth: copy_elements_function_ = iterable.input() ->dtype() ->copy_assign[buffer_parameters.input_buffer_kind]; break; case NDIterableCopyManager::BufferSource::kExternal: buffer_manager_.Initialize(layout.block_shape, {{iterable.input()->dtype()}}, {{{{buffer_parameters.input_buffer_kind, buffer_parameters.output_buffer_kind}}}}); break; default: break; } constexpr static CopyImpl kCopyImpls[] = { [](NDIteratorCopyManager* self, span<const Index> indices, IterationBufferShape block_shape, absl::Status* status) -> bool { IterationBufferPointer input_pointer, output_pointer; return self->input_->GetBlock(indices, block_shape, &input_pointer, status) && self->output_->GetBlock(indices, block_shape, &output_pointer, status) && self->copy_elements_function_(nullptr, block_shape, input_pointer, output_pointer, status) && self->output_->UpdateBlock(indices, block_shape, output_pointer, status); }, [](NDIteratorCopyManager* self, span<const Index> indices, IterationBufferShape block_shape, absl::Status* status) -> bool { IterationBufferPointer pointer; return self->input_->GetBlock(indices, block_shape, &pointer, status) && self->output_->GetBlock(indices, block_shape, &pointer, status) && self->output_->UpdateBlock(indices, block_shape, pointer, status); }, [](NDIteratorCopyManager* self, span<const Index> indices, IterationBufferShape block_shape, absl::Status* status) -> bool { IterationBufferPointer pointer; return self->output_->GetBlock(indices, block_shape, &pointer, status) && self->input_->GetBlock(indices, block_shape, &pointer, status) && self->output_->UpdateBlock(indices, block_shape, pointer, status); }, [](NDIteratorCopyManager* self, span<const Index> indices, IterationBufferShape block_shape, absl::Status* status) -> bool { return self->input_->GetBlock( indices, block_shape, &self->buffer_manager_.buffer_pointers()[0][0], status) && self->output_->GetBlock( indices, block_shape, &self->buffer_manager_.buffer_pointers()[1][0], status) && self->output_->UpdateBlock( indices, block_shape, self->buffer_manager_.buffer_pointers()[1][0], status); }, }; copy_impl_ = kCopyImpls[static_cast<int>(buffer_parameters.buffer_source)]; } NDIterableCopier::NDIterableCopier(const NDIterable& input, const NDIterable& output, span<const Index> shape, IterationConstraints constraints, Arena* arena) : NDIterableCopier(NDIterableCopyManager(&input, &output), shape, constraints, arena) {} NDIterableCopier::NDIterableCopier( const NDIterableCopyManager& iterable_copy_manager, span<const Index> shape, IterationConstraints constraints, Arena* arena) : layout_info_(iterable_copy_manager, shape, constraints), block_shape_(GetNDIterationBlockShape( iterable_copy_manager.GetWorkingMemoryBytesPerElement( layout_info_.layout_view()), layout_info_.iteration_shape)), iterator_copy_manager_(iterable_copy_manager, {layout_info_.layout_view(), block_shape_}, arena) {} absl::Status NDIterableCopier::Copy() { span<const Index> iteration_shape = layout_info_.iteration_shape; std::fill_n(position_, iteration_shape.size(), static_cast<Index>(0)); if (layout_info_.empty) { return absl::OkStatus(); } absl::Status copy_status; if (Index inner_block_size = block_shape_[1]; inner_block_size != iteration_shape.back()) { assert(block_shape_[0] == 1); for (Index block_size = inner_block_size; block_size;) { if (!iterator_copy_manager_.Copy( span<const Index>(position_, iteration_shape.size()), {1, block_size}, &copy_status)) { return GetElementCopyErrorStatus(std::move(copy_status)); } block_size = StepBufferPositionForward(iteration_shape, block_size, inner_block_size, position_); } } else { const Index outer_block_size = block_shape_[0]; for (Index block_size = outer_block_size; block_size;) { if (!iterator_copy_manager_.Copy( span<const Index>(position_, iteration_shape.size()), {block_size, inner_block_size}, &copy_status)) { return GetElementCopyErrorStatus(std::move(copy_status)); } block_size = StepBufferPositionForward( iteration_shape.first(iteration_shape.size() - 1), block_size, outer_block_size, position_); } } return absl::OkStatus(); } } }
#include "tensorstore/internal/nditerable_copy.h" #include <memory> #include <new> #include <string> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/str_cat.h" #include "tensorstore/array.h" #include "tensorstore/contiguous_layout.h" #include "tensorstore/data_type.h" #include "tensorstore/index.h" #include "tensorstore/index_space/dim_expression.h" #include "tensorstore/index_space/transformed_array.h" #include "tensorstore/internal/arena.h" #include "tensorstore/internal/elementwise_function.h" #include "tensorstore/internal/memory.h" #include "tensorstore/internal/meta.h" #include "tensorstore/internal/nditerable.h" #include "tensorstore/internal/nditerable_elementwise_input_transform.h" #include "tensorstore/internal/nditerable_elementwise_output_transform.h" #include "tensorstore/internal/nditerable_transformed_array.h" #include "tensorstore/internal/nditerable_util.h" #include "tensorstore/rank.h" #include "tensorstore/util/iterate.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::dtype_v; using ::tensorstore::Index; using ::tensorstore::MakeArray; using ::tensorstore::Shared; using ::tensorstore::internal::GetElementwiseInputTransformNDIterable; using ::tensorstore::internal::GetElementwiseOutputTransformNDIterable; using ::tensorstore::internal::GetTransformedArrayNDIterable; TEST(NDIterableCopyTest, Example) { auto source_array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}}); auto dest_array = tensorstore::AllocateArray<int>( {2, 3}, tensorstore::c_order, tensorstore::value_init); auto dest_element_transform = [](const int* source, int* dest, void* arg) { auto* status = static_cast<absl::Status*>(arg); if (*source == 5) { *status = absl::UnknownError("5"); return false; } *dest = *source; return true; }; tensorstore::internal::ElementwiseClosure<2, void*> dest_closure = tensorstore::internal::SimpleElementwiseFunction< decltype(dest_element_transform)(const int, int), void*>::Closure(&dest_element_transform); tensorstore::internal::Arena arena; auto source_iterable = GetTransformedArrayNDIterable(source_array, &arena).value(); auto dest_iterable = GetElementwiseOutputTransformNDIterable( GetTransformedArrayNDIterable(dest_array, &arena).value(), dtype_v<int>, dest_closure, &arena); tensorstore::internal::NDIterableCopier copier( *source_iterable, *dest_iterable, dest_array.shape(), tensorstore::c_order, &arena); EXPECT_EQ(absl::UnknownError("5"), copier.Copy()); EXPECT_EQ(MakeArray<int>({{1, 2, 3}, {4, 0, 0}}), dest_array); } template <typename IntermediateElement, typename SourceArray, typename SourceElementTransform, typename DestElementTransform, typename DestArray> absl::Status TestCopy(tensorstore::IterationConstraints constraints, SourceArray source_array, SourceElementTransform source_element_transform, DestElementTransform dest_element_transform, DestArray dest_array) { tensorstore::internal::Arena arena; tensorstore::internal::ElementwiseClosure<2, void*> source_closure = tensorstore::internal::SimpleElementwiseFunction< SourceElementTransform(typename SourceArray::Element, IntermediateElement), void*>::Closure(&source_element_transform); tensorstore::internal::ElementwiseClosure<2, void*> dest_closure = tensorstore::internal::SimpleElementwiseFunction< DestElementTransform(IntermediateElement, typename DestArray::Element), void*>::Closure(&dest_element_transform); auto source_iterable = GetElementwiseInputTransformNDIterable( {{GetTransformedArrayNDIterable(source_array, &arena).value()}}, dtype_v<IntermediateElement>, source_closure, &arena); auto dest_iterable = GetElementwiseOutputTransformNDIterable( GetTransformedArrayNDIterable(dest_array, &arena).value(), dtype_v<IntermediateElement>, dest_closure, &arena); return tensorstore::internal::NDIterableCopier( *source_iterable, *dest_iterable, dest_array.shape(), constraints, &arena) .Copy(); } TEST(NDIterableCopyTest, ExternalBuffer) { for (const bool indexed_source : {false, true}) { for (const bool indexed_dest : {false, true}) { SCOPED_TRACE(absl::StrCat("indexed_source=", indexed_source, ", indexed_dest=", indexed_dest) .c_str()); auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}}); tensorstore::TransformedArray<Shared<const int>> tsource = source; if (indexed_source) { tsource = (source | tensorstore::Dims(0, 1).OuterIndexArraySlice( MakeArray<Index>({0, 1}), MakeArray<Index>({0, 1, 2}))) .value(); } auto dest = tensorstore::AllocateArray<double>(source.shape()); tensorstore::TransformedArray<Shared<double>> tdest = dest; if (indexed_dest) { tdest = (dest | tensorstore::Dims(0, 1).OuterIndexArraySlice( MakeArray<Index>({0, 1}), MakeArray<Index>({0, 1, 2}))) .value(); } EXPECT_EQ(absl::OkStatus(), (TestCopy<unsigned int>( {}, tsource, [](const int* source, unsigned int* dest, void* status) { *dest = *source * 2; }, [](const unsigned int* source, double* dest, void* status) { *dest = *source + 100.0; }, tdest))); EXPECT_EQ(tensorstore::MakeArray<double>( {{102.0, 104.0, 106.0}, {108.0, 110.0, 112.0}}), dest); } } } class MaybeUnitBlockSizeTest : public ::testing::TestWithParam<bool> { public: MaybeUnitBlockSizeTest() { #ifndef NDEBUG tensorstore::internal::SetNDIterableTestUnitBlockSize(GetParam()); #endif } ~MaybeUnitBlockSizeTest() { #ifndef NDEBUG tensorstore::internal::SetNDIterableTestUnitBlockSize(false); #endif } }; INSTANTIATE_TEST_SUITE_P(NormalBlockSize, MaybeUnitBlockSizeTest, ::testing::Values(false)); #ifndef NDEBUG INSTANTIATE_TEST_SUITE_P(UnitBlockSize, MaybeUnitBlockSizeTest, ::testing::Values(true)); #endif TEST_P(MaybeUnitBlockSizeTest, InnerIndexArray) { constexpr size_t length = 5000; auto source = tensorstore::AllocateArray<int>({length}); auto dest = tensorstore::AllocateArray<int>({length}); auto expected = tensorstore::AllocateArray<int>({length}); auto indices = tensorstore::AllocateArray<int64_t>({length}); for (int i = 0; i < length; ++i) { source(i) = -i; dest(i) = 42; indices(i) = length - 1 - i; expected(i) = -(length - 1 - i); } TENSORSTORE_ASSERT_OK_AND_ASSIGN( tensorstore::TransformedArray<Shared<const int>> tsource, source | tensorstore::Dims(0).IndexArraySlice(indices)); tensorstore::TransformedArray<Shared<int>> tdest = dest; tensorstore::internal::Arena arena; TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto source_iterable, GetTransformedArrayNDIterable(tsource, &arena)); TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto dest_iterable, GetTransformedArrayNDIterable(tdest, &arena)); TENSORSTORE_ASSERT_OK(tensorstore::internal::NDIterableCopier( *source_iterable, *dest_iterable, dest.shape(), {}, &arena) .Copy()); EXPECT_EQ(expected, dest); } }
absl::Status NDIterableCopier::Copy() { span<const Index> iteration_shape = layout_info_.iteration_shape; std::fill_n(position_, iteration_shape.size(), static_cast<Index>(0)); if (layout_info_.empty) { return absl::OkStatus(); } absl::Status copy_status; if (Index inner_block_size = block_shape_[1]; inner_block_size != iteration_shape.back()) { assert(block_shape_[0] == 1); for (Index block_size = inner_block_size; block_size;) { if (!iterator_copy_manager_.Copy( span<const Index>(position_, iteration_shape.size()), {1, block_size}, &copy_status)) { return GetElementCopyErrorStatus(std::move(copy_status)); } block_size = StepBufferPositionForward(iteration_shape, block_size, inner_block_size, position_); } } else { const Index outer_block_size = block_shape_[0]; for (Index block_size = outer_block_size; block_size;) { if (!iterator_copy_manager_.Copy( span<const Index>(position_, iteration_shape.size()), {block_size, inner_block_size}, &copy_status)) { return GetElementCopyErrorStatus(std::move(copy_status)); } block_size = StepBufferPositionForward( iteration_shape.first(iteration_shape.size() - 1), block_size, outer_block_size, position_); } } return absl::OkStatus(); }
TEST(NDIterableCopyTest, Example) { auto source_array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}} TEST(NDIterableCopyTest, ExternalBuffer) { for (const bool indexed_source : {false, true} #endif TEST_P(MaybeUnitBlockSizeTest, InnerIndexArray) { constexpr size_t length = 5000; auto source = tensorstore::AllocateArray<int>({length}
#include "quiche/balsa/balsa_headers_sequence.h" #include <memory> #include <utility> #include "quiche/balsa/balsa_headers.h" namespace quiche { void BalsaHeadersSequence::Append(std::unique_ptr<BalsaHeaders> headers) { sequence_.push_back(std::move(headers)); } bool BalsaHeadersSequence::HasNext() const { return next_ < sequence_.size(); } BalsaHeaders* BalsaHeadersSequence::PeekNext() { if (!HasNext()) { return nullptr; } return sequence_[next_].get(); } BalsaHeaders* BalsaHeadersSequence::Next() { if (!HasNext()) { return nullptr; } return sequence_[next_++].get(); } void BalsaHeadersSequence::Clear() { sequence_.clear(); next_ = 0; } }
#include "quiche/balsa/balsa_headers_sequence.h" #include <memory> #include <utility> #include "quiche/balsa/balsa_headers.h" #include "quiche/common/platform/api/quiche_test.h" namespace quiche { namespace test { namespace { TEST(BalsaHeadersSequenceTest, Initial) { BalsaHeadersSequence sequence; EXPECT_FALSE(sequence.HasNext()); EXPECT_EQ(sequence.Next(), nullptr); EXPECT_TRUE(sequence.IsEmpty()); } TEST(BalsaHeadersSequenceTest, Basic) { BalsaHeadersSequence sequence; auto headers_one = std::make_unique<BalsaHeaders>(); headers_one->AppendHeader("one", "fish"); sequence.Append(std::move(headers_one)); EXPECT_TRUE(sequence.HasNext()); EXPECT_FALSE(sequence.IsEmpty()); auto headers_two = std::make_unique<BalsaHeaders>(); headers_two->AppendHeader("two", "fish"); sequence.Append(std::move(headers_two)); EXPECT_TRUE(sequence.HasNext()); EXPECT_FALSE(sequence.IsEmpty()); const BalsaHeaders* headers = sequence.Next(); ASSERT_NE(headers, nullptr); EXPECT_TRUE(headers->HasHeader("one")); EXPECT_TRUE(sequence.HasNext()); EXPECT_FALSE(sequence.IsEmpty()); headers = sequence.Next(); ASSERT_NE(headers, nullptr); EXPECT_TRUE(headers->HasHeader("two")); EXPECT_FALSE(sequence.HasNext()); EXPECT_FALSE(sequence.IsEmpty()); EXPECT_EQ(sequence.Next(), nullptr); } TEST(BalsaHeadersSequenceTest, Clear) { BalsaHeadersSequence sequence; auto headers_one = std::make_unique<BalsaHeaders>(); headers_one->AppendHeader("one", "fish"); sequence.Append(std::move(headers_one)); EXPECT_TRUE(sequence.HasNext()); EXPECT_FALSE(sequence.IsEmpty()); auto headers_two = std::make_unique<BalsaHeaders>(); headers_two->AppendHeader("two", "fish"); sequence.Append(std::move(headers_two)); EXPECT_TRUE(sequence.HasNext()); EXPECT_FALSE(sequence.IsEmpty()); sequence.Clear(); EXPECT_FALSE(sequence.HasNext()); EXPECT_EQ(sequence.Next(), nullptr); EXPECT_TRUE(sequence.IsEmpty()); } TEST(BalsaHeadersSequenceTest, PeekNext) { BalsaHeadersSequence sequence; EXPECT_EQ(sequence.PeekNext(), nullptr); auto headers_one = std::make_unique<BalsaHeaders>(); headers_one->AppendHeader("one", "fish"); sequence.Append(std::move(headers_one)); EXPECT_TRUE(sequence.HasNext()); const BalsaHeaders* headers = sequence.PeekNext(); ASSERT_NE(headers, nullptr); EXPECT_TRUE(headers->HasHeader("one")); EXPECT_TRUE(sequence.HasNext()); EXPECT_EQ(sequence.PeekNext(), headers); auto headers_two = std::make_unique<BalsaHeaders>(); headers_two->AppendHeader("two", "fish"); sequence.Append(std::move(headers_two)); EXPECT_TRUE(sequence.HasNext()); EXPECT_EQ(sequence.PeekNext(), headers); headers = sequence.Next(); ASSERT_NE(headers, nullptr); EXPECT_TRUE(headers->HasHeader("one")); EXPECT_TRUE(sequence.HasNext()); headers = sequence.PeekNext(); ASSERT_NE(headers, nullptr); EXPECT_TRUE(headers->HasHeader("two")); EXPECT_TRUE(sequence.HasNext()); headers = sequence.Next(); ASSERT_NE(headers, nullptr); EXPECT_TRUE(headers->HasHeader("two")); EXPECT_FALSE(sequence.HasNext()); EXPECT_EQ(sequence.PeekNext(), nullptr); } TEST(BalsaHeadersSequenceTest, CanRetainValidReference) { BalsaHeadersSequence sequence; auto headers = std::make_unique<BalsaHeaders>(); headers->AppendHeader("one", "fish"); BalsaHeaders* headers_ptr = headers.get(); sequence.Append(std::move(headers)); ASSERT_TRUE(sequence.HasNext()); EXPECT_EQ(sequence.Next(), headers_ptr); } } } }
void BalsaHeadersSequence::Clear() { sequence_.clear(); next_ = 0; }
TEST(BalsaHeadersSequenceTest, Clear) { BalsaHeadersSequence sequence; auto headers_one = std::make_unique<BalsaHeaders>(); headers_one->AppendHeader("one", "fish"); sequence.Append(std::move(headers_one)); EXPECT_TRUE(sequence.HasNext()); EXPECT_FALSE(sequence.IsEmpty()); auto headers_two = std::make_unique<BalsaHeaders>(); headers_two->AppendHeader("two", "fish"); sequence.Append(std::move(headers_two)); EXPECT_TRUE(sequence.HasNext()); EXPECT_FALSE(sequence.IsEmpty()); sequence.Clear(); EXPECT_FALSE(sequence.HasNext()); EXPECT_EQ(sequence.Next(), nullptr); EXPECT_TRUE(sequence.IsEmpty()); }
#include "tensorflow/core/lib/strings/ordered_code.h" #include <assert.h> #include <stddef.h> #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/stringpiece.h" #include "tsl/lib/core/bits.h" namespace tensorflow { namespace strings { static const char kEscape1 = '\000'; static const char kNullCharacter = '\xff'; static const char kSeparator = '\001'; static const char kEscape2 = '\xff'; static const char kFFCharacter = '\000'; static const char kEscape1_Separator[2] = {kEscape1, kSeparator}; inline static void AppendBytes(string* dest, const char* src, size_t len) { dest->append(src, len); } inline bool IsSpecialByte(char c) { return (static_cast<unsigned char>(c + 1)) < 2; } inline const char* SkipToNextSpecialByte(const char* start, const char* limit) { DCHECK_EQ(kEscape1, 0); DCHECK_EQ(kEscape2 & 0xffu, 255u); const char* p = start; while (p < limit && !IsSpecialByte(*p)) { p++; } return p; } const char* OrderedCode::TEST_SkipToNextSpecialByte(const char* start, const char* limit) { return SkipToNextSpecialByte(start, limit); } inline static void EncodeStringFragment(string* dest, StringPiece s) { const char* p = s.data(); const char* limit = p + s.size(); const char* copy_start = p; while (true) { p = SkipToNextSpecialByte(p, limit); if (p >= limit) break; char c = *(p++); DCHECK(IsSpecialByte(c)); if (c == kEscape1) { AppendBytes(dest, copy_start, p - copy_start - 1); dest->push_back(kEscape1); dest->push_back(kNullCharacter); copy_start = p; } else { assert(c == kEscape2); AppendBytes(dest, copy_start, p - copy_start - 1); dest->push_back(kEscape2); dest->push_back(kFFCharacter); copy_start = p; } } if (p > copy_start) { AppendBytes(dest, copy_start, p - copy_start); } } void OrderedCode::WriteString(string* dest, StringPiece s) { EncodeStringFragment(dest, s); AppendBytes(dest, kEscape1_Separator, 2); } void OrderedCode::WriteNumIncreasing(string* dest, uint64 val) { unsigned char buf[9]; int len = 0; while (val > 0) { len++; buf[9 - len] = (val & 0xff); val >>= 8; } buf[9 - len - 1] = len; len++; AppendBytes(dest, reinterpret_cast<const char*>(buf + 9 - len), len); } inline static bool ReadStringInternal(StringPiece* src, string* result) { const char* start = src->data(); const char* string_limit = src->data() + src->size(); const char* limit = string_limit - 1; const char* copy_start = start; while (true) { start = SkipToNextSpecialByte(start, limit); if (start >= limit) break; const char c = *(start++); DCHECK(IsSpecialByte(c)); if (c == kEscape1) { if (result) { AppendBytes(result, copy_start, start - copy_start - 1); } const char next = *(start++); if (next == kSeparator) { src->remove_prefix(start - src->data()); return true; } else if (next == kNullCharacter) { if (result) { *result += '\0'; } } else { return false; } copy_start = start; } else { assert(c == kEscape2); if (result) { AppendBytes(result, copy_start, start - copy_start - 1); } const char next = *(start++); if (next == kFFCharacter) { if (result) { *result += '\xff'; } } else { return false; } copy_start = start; } } return false; } bool OrderedCode::ReadString(StringPiece* src, string* result) { return ReadStringInternal(src, result); } bool OrderedCode::ReadNumIncreasing(StringPiece* src, uint64* result) { if (src->empty()) { return false; } const size_t len = static_cast<unsigned char>((*src)[0]); DCHECK(0 == len || src->size() == 1 || (*src)[1] != '\0') << "invalid encoding"; if (len + 1 > src->size() || len > 8) { return false; } if (result) { uint64 tmp = 0; for (size_t i = 0; i < len; i++) { tmp <<= 8; tmp |= static_cast<unsigned char>((*src)[1 + i]); } *result = tmp; } src->remove_prefix(len + 1); return true; } void OrderedCode::TEST_Corrupt(string* str, int k) { int seen_seps = 0; for (size_t i = 0; i + 1 < str->size(); i++) { if ((*str)[i] == kEscape1 && (*str)[i + 1] == kSeparator) { seen_seps++; if (seen_seps == k) { (*str)[i + 1] = kSeparator + 1; return; } } } } static const int kMaxSigned64Length = 10; static const char kLengthToHeaderBits[1 + kMaxSigned64Length][2] = { {0, 0}, {'\x80', 0}, {'\xc0', 0}, {'\xe0', 0}, {'\xf0', 0}, {'\xf8', 0}, {'\xfc', 0}, {'\xfe', 0}, {'\xff', 0}, {'\xff', '\x80'}, {'\xff', '\xc0'}}; static const uint64 kLengthToMask[1 + kMaxSigned64Length] = { 0ULL, 0x80ULL, 0xc000ULL, 0xe00000ULL, 0xf0000000ULL, 0xf800000000ULL, 0xfc0000000000ULL, 0xfe000000000000ULL, 0xff00000000000000ULL, 0x8000000000000000ULL, 0ULL}; static const int8 kBitsToLength[1 + 63] = { 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10}; static inline int SignedEncodingLength(int64_t n) { return kBitsToLength[tsl::Log2Floor64(n < 0 ? ~n : n) + 1]; } static void StoreBigEndian64(char* dst, uint64 v) { for (int i = 0; i < 8; i++) { dst[i] = (v >> (56 - 8 * i)) & 0xff; } } static uint64 LoadBigEndian64(const char* src) { uint64 result = 0; for (int i = 0; i < 8; i++) { unsigned char c = static_cast<unsigned char>(src[i]); result |= static_cast<uint64>(c) << (56 - 8 * i); } return result; } void OrderedCode::WriteSignedNumIncreasing(string* dest, int64_t val) { const uint64 x = val < 0 ? ~val : val; if (x < 64) { *dest += kLengthToHeaderBits[1][0] ^ val; return; } const char sign_byte = val < 0 ? '\xff' : '\0'; char buf[10] = { sign_byte, sign_byte, }; StoreBigEndian64(buf + 2, val); static_assert(sizeof(buf) == kMaxSigned64Length, "max length size mismatch"); const int len = SignedEncodingLength(x); DCHECK_GE(len, 2); char* const begin = buf + sizeof(buf) - len; begin[0] ^= kLengthToHeaderBits[len][0]; begin[1] ^= kLengthToHeaderBits[len][1]; dest->append(begin, len); } bool OrderedCode::ReadSignedNumIncreasing(StringPiece* src, int64_t* result) { if (src->empty()) return false; const uint64 xor_mask = (!((*src)[0] & 0x80)) ? ~0ULL : 0ULL; const unsigned char first_byte = (*src)[0] ^ (xor_mask & 0xff); int len; uint64 x; if (first_byte != 0xff) { len = 7 - tsl::Log2Floor64(first_byte ^ 0xff); if (src->size() < static_cast<size_t>(len)) return false; x = xor_mask; for (int i = 0; i < len; ++i) x = (x << 8) | static_cast<unsigned char>((*src)[i]); } else { len = 8; if (src->size() < static_cast<size_t>(len)) return false; const unsigned char second_byte = (*src)[1] ^ (xor_mask & 0xff); if (second_byte >= 0x80) { if (second_byte < 0xc0) { len = 9; } else { const unsigned char third_byte = (*src)[2] ^ (xor_mask & 0xff); if (second_byte == 0xc0 && third_byte < 0x80) { len = 10; } else { return false; } } if (src->size() < static_cast<size_t>(len)) return false; } x = LoadBigEndian64(src->data() + len - 8); } x ^= kLengthToMask[len]; DCHECK_EQ(len, SignedEncodingLength(x)) << "invalid encoding"; if (result) *result = x; src->remove_prefix(len); return true; } } }
#include "tensorflow/core/lib/strings/ordered_code.h" #include <float.h> #include <stddef.h> #include <limits> #include <vector> #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace strings { namespace { string RandomString(random::SimplePhilox* rnd, size_t len) { string x; for (size_t i = 0; i < len; i++) { x += rnd->Uniform(256); } return x; } template <typename T> void OCWriteIncreasing(string* dest, const T& val); template <typename T> bool OCReadIncreasing(StringPiece* src, T* result); template <> void OCWriteIncreasing<string>(string* dest, const string& val) { OrderedCode::WriteString(dest, val); } template <> bool OCReadIncreasing<string>(StringPiece* src, string* result) { return OrderedCode::ReadString(src, result); } template <> void OCWriteIncreasing<uint64>(string* dest, const uint64& val) { OrderedCode::WriteNumIncreasing(dest, val); } template <> bool OCReadIncreasing<uint64>(StringPiece* src, uint64* result) { return OrderedCode::ReadNumIncreasing(src, result); } template <> void OCWriteIncreasing<int64_t>(string* dest, const int64_t& val) { OrderedCode::WriteSignedNumIncreasing(dest, val); } template <> bool OCReadIncreasing<int64_t>(StringPiece* src, int64_t* result) { return OrderedCode::ReadSignedNumIncreasing(src, result); } template <typename T> string OCWrite(T val) { string result; OCWriteIncreasing<T>(&result, val); return result; } template <typename T> void OCWriteToString(string* result, T val) { OCWriteIncreasing<T>(result, val); } template <typename T> bool OCRead(StringPiece* s, T* val) { return OCReadIncreasing<T>(s, val); } template <typename T> T TestRead(const string& a) { for (int i = 0; i < a.size() - 1; ++i) { StringPiece s(a.data(), i); CHECK(!OCRead<T>(&s, nullptr)); CHECK_EQ(s, a.substr(0, i)); } StringPiece s(a); T v; CHECK(OCRead<T>(&s, &v)); CHECK(s.empty()); return v; } template <typename T> void TestWriteRead(T expected) { EXPECT_EQ(expected, TestRead<T>(OCWrite<T>(expected))); } template <typename T, typename U> void TestWriteAppends(T first, U second) { string encoded; OCWriteToString<T>(&encoded, first); string encoded_first_only = encoded; OCWriteToString<U>(&encoded, second); EXPECT_NE(encoded, encoded_first_only); EXPECT_TRUE(absl::StartsWith(encoded, encoded_first_only)); } template <typename T> void TestNumbers(T multiplier) { for (T x = std::numeric_limits<T>().max(); x != 0; x /= 2) { TestWriteRead(multiplier * (x - 1)); TestWriteRead(multiplier * x); if (x != std::numeric_limits<T>::max()) { TestWriteRead(multiplier * (x + 1)); } else if (multiplier < 0 && multiplier == -1) { TestWriteRead(-x - 1); } } random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); for (int bits = 1; bits <= std::numeric_limits<T>().digits; ++bits) { const uint64 mask = (~0ULL) >> (64 - bits); for (int i = 0; i < 1000; i++) { T x = rnd.Rand64() & mask; TestWriteRead(multiplier * x); T y = rnd.Rand64() & mask; TestWriteAppends(multiplier * x, multiplier * y); } } } bool CompareStrings(const string& a, const string& b) { return (a < b); } template <typename T> void TestNumberOrdering() { string laststr = OCWrite<T>(std::numeric_limits<T>().min()); for (T num = std::numeric_limits<T>().min() / 2; num != 0; num /= 2) { string strminus1 = OCWrite<T>(num - 1); string str = OCWrite<T>(num); string strplus1 = OCWrite<T>(num + 1); CHECK(CompareStrings(strminus1, str)); CHECK(CompareStrings(str, strplus1)); CHECK(CompareStrings(laststr, str)); laststr = str; } laststr = OCWrite<T>(0); T num = 1; while (num < std::numeric_limits<T>().max() / 2) { num *= 2; string strminus1 = OCWrite<T>(num - 1); string str = OCWrite<T>(num); string strplus1 = OCWrite<T>(num + 1); CHECK(CompareStrings(strminus1, str)); CHECK(CompareStrings(str, strplus1)); CHECK(CompareStrings(laststr, str)); laststr = str; } } size_t FindSpecial(const string& x) { const char* p = x.data(); const char* limit = p + x.size(); const char* result = OrderedCode::TEST_SkipToNextSpecialByte(p, limit); return result - p; } template <size_t N> string ByteSequence(const char (&arr)[N]) { return string(arr, N - 1); } TEST(OrderedCode, SkipToNextSpecialByte) { for (size_t len = 0; len < 256; len++) { random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); string x; while (x.size() < len) { char c = 1 + rnd.Uniform(254); ASSERT_NE(c, 0); ASSERT_NE(c, 255); x += c; } EXPECT_EQ(FindSpecial(x), x.size()); for (size_t special_pos = 0; special_pos < len; special_pos++) { for (size_t special_test = 0; special_test < 2; special_test++) { const char special_byte = (special_test == 0) ? 0 : 255; string y = x; y[special_pos] = special_byte; EXPECT_EQ(FindSpecial(y), special_pos); if (special_pos < 16) { for (size_t rest = special_pos + 1; rest < len; rest++) { if (rnd.OneIn(3)) { y[rest] = rnd.OneIn(2) ? 0 : 255; EXPECT_EQ(FindSpecial(y), special_pos); } } } } } } } TEST(OrderedCode, ExhaustiveFindSpecial) { char buf[16]; char* limit = buf + sizeof(buf); int count = 0; for (int start_offset = 0; start_offset <= 5; start_offset += 5) { for (size_t i = 0; i < sizeof(buf); i++) { buf[i] = 'a'; } for (int b0 = 0; b0 < 256; b0++) { for (int b1 = 0; b1 < 256; b1++) { for (int b2 = 0; b2 < 256; b2++) { buf[start_offset + 0] = b0; buf[start_offset + 1] = b1; buf[start_offset + 2] = b2; char* expected; if (b0 == 0 || b0 == 255) { expected = &buf[start_offset]; } else if (b1 == 0 || b1 == 255) { expected = &buf[start_offset + 1]; } else if (b2 == 0 || b2 == 255) { expected = &buf[start_offset + 2]; } else { expected = limit; } count++; EXPECT_EQ(expected, OrderedCode::TEST_SkipToNextSpecialByte(buf, limit)); } } } } EXPECT_EQ(count, 256 * 256 * 256 * 2); } TEST(Uint64, EncodeDecode) { TestNumbers<uint64>(1); } TEST(Uint64, Ordering) { TestNumberOrdering<uint64>(); } TEST(Int64, EncodeDecode) { TestNumbers<int64_t>(1); TestNumbers<int64_t>(-1); } TEST(Int64, Ordering) { TestNumberOrdering<int64_t>(); } inline string StrNot(const string& s) { string result; for (string::const_iterator it = s.begin(); it != s.end(); ++it) result.push_back(~*it); return result; } template <typename T> void TestInvalidEncoding(const string& s) { StringPiece p(s); EXPECT_FALSE(OCRead<T>(&p, nullptr)); EXPECT_EQ(s, p); } TEST(OrderedCodeInvalidEncodingsTest, Overflow) { const string k2xx64U = "\x09\x01" + string(8, 0); TestInvalidEncoding<uint64>(k2xx64U); const string k2xx63 = "\xff\xc0\x80" + string(7, 0); TestInvalidEncoding<int64_t>(k2xx63); TestInvalidEncoding<int64_t>(StrNot(k2xx63)); } TEST(OrderedCodeInvalidEncodingsDeathTest, NonCanonical) { random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); for (int n = 2; n <= 9; ++n) { string non_minimal = string(1, n - 1) + string(1, 0) + RandomString(&rnd, n - 2); EXPECT_EQ(n, non_minimal.length()); EXPECT_NE(OCWrite<uint64>(0), non_minimal); #ifndef NDEBUG StringPiece s(non_minimal); EXPECT_DEATH(OrderedCode::ReadNumIncreasing(&s, nullptr), "invalid encoding"); #else TestRead<uint64>(non_minimal); #endif } for (int n = 2; n <= 10; ++n) { string header = string(n / 8, 0xff) + string(1, 0xff << (8 - (n % 8))); string non_minimal = header + string(1, rnd.Uniform(256) & ~*header.rbegin()) + RandomString(&rnd, n - header.length() - 1); EXPECT_EQ(n, non_minimal.length()); EXPECT_NE(OCWrite<int64_t>(0), non_minimal); #ifndef NDEBUG StringPiece s(non_minimal); EXPECT_DEATH(OrderedCode::ReadSignedNumIncreasing(&s, nullptr), "invalid encoding") << n; #else TestRead<int64_t>(non_minimal); #endif } } uint64 NextBits(random::SimplePhilox* rnd, int bits) { return (bits != 0) ? (rnd->Rand64() % (1LL << (bits - 1))) + (1LL << (bits - 1)) : 0; } template <typename T> void BM_WriteNum(::testing::benchmark::State& state, T multiplier) { constexpr int kValues = 64; T values[kValues]; random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); for (int i = 0; i < kValues; i++) { values[i] = NextBits(&rnd, state.max_iterations % 64) * multiplier; } string result; int index = 0; for (auto i : state) { result.clear(); OCWriteToString<T>(&result, values[index % kValues]); index++; } } template <typename T> void BM_ReadNum(::testing::benchmark::State& state, T multiplier) { random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); constexpr int kValues = 64; string values[kValues]; for (int i = 0; i < kValues; i++) { T val = NextBits(&rnd, i % 64) * multiplier; values[i] = OCWrite<T>(val); } uint32 index = 0; for (auto i : state) { T val; StringPiece s = values[index++ % kValues]; OCRead<T>(&s, &val); } } #define BENCHMARK_NUM(name, T, multiplier) \ void BM_Write##name(::testing::benchmark::State& state) { \ BM_WriteNum<T>(state, multiplier); \ } \ BENCHMARK(BM_Write##name); \ void BM_Read##name(::testing::benchmark::State& state) { \ BM_ReadNum<T>(state, multiplier); \ } \ BENCHMARK(BM_Read##name) BENCHMARK_NUM(NumIncreasing, uint64, 1); BENCHMARK_NUM(SignedNum, int64_t, 1); BENCHMARK_NUM(SignedNumNegative, int64_t, -1); #undef BENCHMARK_NUM TEST(String, EncodeDecode) { random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); for (int len = 0; len < 256; len++) { const string a = RandomString(&rnd, len); TestWriteRead(a); for (int len2 = 0; len2 < 64; len2++) { const string b = RandomString(&rnd, len2); TestWriteAppends(a, b); string out; OCWriteToString<string>(&out, a); OCWriteToString<string>(&out, b); string a2, b2, dummy; StringPiece s = out; StringPiece s2 = out; CHECK(OCRead<string>(&s, &a2)); CHECK(OCRead<string>(&s2, nullptr)); CHECK_EQ(s, s2); CHECK(OCRead<string>(&s, &b2)); CHECK(OCRead<string>(&s2, nullptr)); CHECK_EQ(s, s2); CHECK(!OCRead<string>(&s, &dummy)); CHECK(!OCRead<string>(&s2, nullptr)); CHECK_EQ(a, a2); CHECK_EQ(b, b2); CHECK(s.empty()); CHECK(s2.empty()); } } } #define STATIC_STR(str) StringPiece((str), sizeof(str) - 1) string EncodeStringIncreasing(StringPiece value) { string encoded; OrderedCode::WriteString(&encoded, value); return encoded; } TEST(String, Increasing) { ASSERT_EQ(EncodeStringIncreasing(STATIC_STR("")), EncodeStringIncreasing(STATIC_STR(""))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("")), EncodeStringIncreasing(STATIC_STR("\x00"))); ASSERT_EQ(EncodeStringIncreasing(STATIC_STR("\x00")), EncodeStringIncreasing(STATIC_STR("\x00"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\x00")), EncodeStringIncreasing(STATIC_STR("\x01"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\x01")), EncodeStringIncreasing(STATIC_STR("a"))); ASSERT_EQ(EncodeStringIncreasing(STATIC_STR("a")), EncodeStringIncreasing(STATIC_STR("a"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("a")), EncodeStringIncreasing(STATIC_STR("aa"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("aa")), EncodeStringIncreasing(STATIC_STR("\xff"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\xff")), EncodeStringIncreasing(STATIC_STR("\xff\x00"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\xff\x00")), EncodeStringIncreasing(STATIC_STR("\xff\x01"))); } TEST(EncodingIsExpected, String) { std::vector<std::pair<string, string>> data = { {"", string("\x00\x01", 2)}, {"foo", string("foo\x00\x01", 5)}, {"hello", string("hello\x00\x01", 7)}, {string("\x00\x01\xff", 3), string("\x00\xff\x01\xff\x00\x00\x01", 7)}, }; for (const auto& t : data) { string result; OrderedCode::WriteString(&result, t.first); EXPECT_EQ(t.second, result); StringPiece in = result; string decoded; EXPECT_TRUE(OrderedCode::ReadString(&in, &decoded)); EXPECT_EQ(t.first, decoded); EXPECT_EQ("", in); } } TEST(EncodingIsExpected, Unsigned) { std::vector<std::pair<uint64, string>> data = { {0x0ull, ByteSequence("\000")}, {0x1ull, ByteSequence("\001\001")}, {0x2ull, ByteSequence("\001\002")}, {0x1ull, ByteSequence("\001\001")}, {0x2ull, ByteSequence("\001\002")}, {0x3ull, ByteSequence("\001\003")}, {0x3ull, ByteSequence("\001\003")}, {0x4ull, ByteSequence("\001\004")}, {0x5ull, ByteSequence("\001\005")}, {0x7ull, ByteSequence("\001\007")}, {0x8ull, ByteSequence("\001\010")}, {0x9ull, ByteSequence("\001\t")}, {0xfull, ByteSequence("\001\017")}, {0x10ull, ByteSequence("\001\020")}, {0x11ull, ByteSequence("\001\021")}, {0x1full, ByteSequence("\001\037")}, {0x20ull, ByteSequence("\001 ")}, {0x21ull, ByteSequence("\001!")}, {0x3full, ByteSequence("\001?")}, {0x40ull, ByteSequence("\001@")}, {0x41ull, ByteSequence("\001A")}, {0x7full, ByteSequence("\001\177")}, {0x80ull, ByteSequence("\001\200")}, {0x81ull, ByteSequence("\001\201")}, {0xffull, ByteSequence("\001\377")}, {0x100ull, ByteSequence("\002\001\000")}, {0x101ull, ByteSequence("\002\001\001")}, {0x1ffull, ByteSequence("\002\001\377")}, {0x200ull, ByteSequence("\002\002\000")}, {0x201ull, ByteSequence("\002\002\001")}, {0x3ffull, ByteSequence("\002\003\377")}, {0x400ull, ByteSequence("\002\004\000")}, {0x401ull, ByteSequence("\002\004\001")}, {0x7ffull, ByteSequence("\002\007\377")}, {0x800ull, ByteSequence("\002\010\000")}, {0x801ull, ByteSequence("\002\010\001")}, {0xfffull, ByteSequence("\002\017\377")}, {0x1000ull, ByteSequence("\002\020\000")}, {0x1001ull, ByteSequence("\002\020\001")}, {0x1fffull, ByteSequence("\002\037\377")}, {0x2000ull, ByteSequence("\002 \000")}, {0x2001ull, ByteSequence("\002 \001")}, {0x3fffull, ByteSequence("\002?\377")}, {0x4000ull, ByteSequence("\002@\000")}, {0x4001ull, ByteSequence("\002@\001")}, {0x7fffull, ByteSequence("\002\177\377")}, {0x8000ull, ByteSequence("\002\200\000")}, {0x8001ull, ByteSequence("\002\200\001")}, {0xffffull, ByteSequence("\002\377\377")}, {0x10000ull, ByteSequence("\003\001\000\000")}, {0x10001ull, ByteSequence("\003\001\000\001")}, {0x1ffffull, ByteSequence("\003\001\377\377")}, {0x20000ull, ByteSequence("\003\002\000\000")}, {0x20001ull, ByteSequence("\003\002\000\001")}, {0x3ffffull, ByteSequence("\003\003\377\377")}, {0x40000ull, ByteSequence("\003\004\000\000")}, {0x40001ull, ByteSequence("\003\004\000\001")}, {0x7ffffull, ByteSequence("\003\007\377\377")}, {0x80000ull, ByteSequence("\003\010\000\000")}, {0x80001ull, ByteSequence("\003\010\000\001")}, {0xfffffull, ByteSequence("\003\017\377\377")}, {0x100000ull, ByteSequence("\003\020\000\000")}, {0x100001ull, ByteSequence("\003\020\000\001")}, {0x1fffffull, ByteSequence("\003\037\377\377")}, {0x200000ull, ByteSequence("\003 \000\000")}, {0x200001ull, ByteSequence("\003 \000\001")}, {0x3fffffull, ByteSequence("\003?\377\377")}, {0x400000ull, ByteSequence("\003@\000\000")}, {0x400001ull, ByteSequence("\003@\000\001")}, {0x7fffffull, ByteSequence("\003\177\377\377")}, {0x800000ull, ByteSequence("\003\200\000\000")}, {0x800001ull, ByteSequence("\003\200\000\001")}, {0xffffffull, ByteSequence("\003\377\377\377")}, {0x1000000ull, ByteSequence("\004\001\000\000\000")}, {0x1000001ull, ByteSequence("\004\001\000\000\001")}, {0x1ffffffull, ByteSequence("\004\001\377\377\377")}, {0x2000000ull, ByteSequence("\004\002\000\000\000")}, {0x2000001ull, ByteSequence("\004\002\000\000\001")}, {0x3ffffffull, ByteSequence("\004\003\377\377\377")}, {0x4000000ull, ByteSequence("\004\004\000\000\000")}, {0x4000001ull, ByteSequence("\004\004\000\000\001")}, {0x7ffffffull, ByteSequence("\004\007\377\377\377")}, {0x8000000ull, ByteSequence("\004\010\000\000\000")}, {0x8000001ull, ByteSequence("\004\010\000\000\001")}, {0xfffffffull, ByteSequence("\004\017\377\377\377")}, {0x10000000ull, ByteSequence("\004\020\000\000\000")}, {0x10000001ull, ByteSequence("\004\020\000\000\001")}, {0x1fffffffull, ByteSequence("\004\037\377\377\377")}, {0x20000000ull, ByteSequence("\004 \000\000\000")}, {0x20000001ull, ByteSequence("\004 \000\000\001")}, {0x3fffffffull, ByteSequence("\004?\377\377\377")}, {0x40000000ull, ByteSequence("\004@\000\000\000")}, {0x40000001ull, ByteSequence("\004@\000\000\001")}, {0x7fffffffull, ByteSequence("\004\177\377\377\377")}, {0x80000000ull, ByteSequence("\004\200\000\000\000")}, {0x80000001ull, ByteSequence("\004\200\000\000\001")}, {0xffffffffull, ByteSequence("\004\377\377\377\377")}, {0x100000000ull, ByteSequence("\005\001\000\000\000\000")}, {0x100000001ull, ByteSequence("\005\001\000\000\000\001")}, {0x1ffffffffull, ByteSequence("\005\001\377\377\377\377")}, {0x200000000ull, ByteSequence("\005\002\000\000\000\000")}, {0x200000001ull, ByteSequence("\005\002\000\000\000\001")}, {0x3ffffffffull, ByteSequence("\005\003\377\377\377\377")}, {0x400000000ull, ByteSequence("\005\004\000\000\000\000")}, {0x400000001ull, ByteSequence("\005\004\000\000\000\001")}, {0x7ffffffffull, ByteSequence("\005\007\377\377\377\377")}, {0x800000000ull, ByteSequence("\005\010\000\000\000\000")}, {0x800000001ull, ByteSequence("\005\010\000\000\000\001")}, {0xfffffffffull, ByteSequence("\005\017\377\377\377\377")}, {0x1000000000ull, ByteSequence("\005\020\000\000\000\000")}, {0x1000000001ull, ByteSequence("\005\020\000\000\000\001")}, {0x1fffffffffull, ByteSequence("\005\037\377\377\377\377")}, {0x2000000000ull, ByteSequence("\005 \000\000\000\000")}, {0x2000000001ull, ByteSequence("\005 \000\000\000\001")}, {0x3fffffffffull, ByteSequence("\005?\377\377\377\377")}, {0x4000000000ull, ByteSequence("\005@\000\000\000\000")}, {0x4000000001ull, ByteSequence("\005@\000\000\000\001")}, {0x7fffffffffull, ByteSequence("\005\177\377\377\377\377")}, {0x8000000000ull, ByteSequence("\005\200\000\000\000\000")}, {0x8000000001ull, ByteSequence("\005\200\000\000\000\001")}, {0xffffffffffull, ByteSequence("\005\377\377\377\377\377")}, {0x10000000000ull, ByteSequence("\006\001\000\000\000\000\000")}, {0x10000000001ull, ByteSequence("\006\001\000\000\000\000\001")}, {0x1ffffffffffull, ByteSequence("\006\001\377\377\377\377\377")}, {0x20000000000ull, ByteSequence("\006\002\000\000\000\000\000")}, {0x20000000001ull, ByteSequence("\006\002\000\000\000\000\001")}, {0x3ffffffffffull, ByteSequence("\006\003\377\377\377\377\377")}, {0x40000000000ull, ByteSequence("\006\004\000\000\000\000\000")}, {0x40000000001ull, ByteSequence("\006\004\000\000\000\000\001")}, {0x7ffffffffffull, ByteSequence("\006\007\377\377\377\377\377")}, {0x80000000000ull, ByteSequence("\006\010\000\000\000\000\000")}, {0x80000000001ull, ByteSequence("\006\010\000\000\000\000\001")}, {0xfffffffffffull, ByteSequence("\006\017\377\377\377\377\377")}, {0x100000000000ull, ByteSequence("\006\020\000\000\000\000\000")}, {0x100000000001ull, ByteSequence("\006\020\000\000\000\000\001")}, {0x1fffffffffffull, ByteSequence("\006\037\377\377\377\377\377")}, {0x200000000000ull, ByteSequence("\006 \000\000\000\000\000")}, {0x200000000001ull, ByteSequence("\006 \000\000\000\000\001")}, {0x3fffffffffffull, ByteSequence("\006?\377\377\377\377\377")}, {0x400000000000ull, ByteSequence("\006@\000\000\000\000\000")}, {0x400000000001ull, ByteSequence("\006@\000\000\000\000\001")}, {0x7fffffffffffull, ByteSequence("\006\177\377\377\377\377\377")}, {0x800000000000ull, ByteSequence("\006\200\000\000\000\000\000")}, {0x800000000001ull, ByteSequence("\006\200\000\000\000\000\001")}, {0xffffffffffffull, ByteSequence("\006\377\377\377\377\377\377")}, {0x1000000000000ull, ByteSequence("\007\001\000\000\000\000\000\000")}, {0x1000000000001ull, ByteSequence("\007\001\000\000\000\000\000\001")}, {0x1ffffffffffffull, ByteSequence("\007\001\377\377\377\377\377\377")}, {0x2000000000000ull, ByteSequence("\007\002\000\000\000\000\000\000")}, {0x2000000000001ull, ByteSequence("\007\002\000\000\000\000\000\001")}, {0x3ffffffffffffull, ByteSequence("\007\003\377\377\377\377\377\377")}, {0x4000000000000ull, ByteSequence("\007\004\000\000\000\000\000\000")}, {0x4000000000001ull, ByteSequence("\007\004\000\000\000\000\000\001")}, {0x7ffffffffffffull, ByteSequence("\007\007\377\377\377\377\377\377")}, {0x8000000000000ull, ByteSequence("\007\010\000\000\000\000\000\000")}, {0x8000000000001ull, ByteSequence("\007\010\000\000\000\000\000\001")}, {0xfffffffffffffull, ByteSequence("\007\017\377\377\377\377\377\377")}, {0x10000000000000ull, ByteSequence("\007\020\000\000\000\000\000\000")}, {0x10000000000001ull, ByteSequence("\007\020\000\000\000\000\000\001")}, {0x1fffffffffffffull, ByteSequence("\007\037\377\377\377\377\377\377")}, {0x20000000000000ull, ByteSequence("\007 \000\000\000\000\000\000")}, {0x20000000000001ull, ByteSequence("\007 \000\000\000\000\000\001")}, {0x3fffffffffffffull, ByteSequence("\007?\377\377\377\377\377\377")}, {0x40000000000000ull, ByteSequence("\007@\000\000\000\000\000\000")}, {0x40000000000001ull, ByteSequence("\007@\000\000\000\000\000\001")}, {0x7fffffffffffffull, ByteSequence("\007\177\377\377\377\377\377\377")}, {0x80000000000000ull, ByteSequence("\007\200\000\000\000\000\000\000")}, {0x80000000000001ull, ByteSequence("\007\200\000\000\000\000\000\001")}, {0xffffffffffffffull, ByteSequence("\007\377\377\377\377\377\377\377")}, {0x100000000000000ull, ByteSequence("\010\001\000\000\000\000\000\000\000")}, {0x100000000000001ull, ByteSequence("\010\001\000\000\000\000\000\000\001")}, {0x1ffffffffffffffull, ByteSequence("\010\001\377\377\377\377\377\377\377")}, {0x200000000000000ull, ByteSequence("\010\002\000\000\000\000\000\000\000")}, {0x200000000000001ull, ByteSequence("\010\002\000\000\000\000\000\000\001")}, {0x3ffffffffffffffull, ByteSequence("\010\003\377\377\377\377\377\377\377")}, {0x400000000000000ull, ByteSequence("\010\004\000\000\000\000\000\000\000")}, {0x400000000000001ull, ByteSequence("\010\004\000\000\000\000\000\000\001")}, {0x7ffffffffffffffull, ByteSequence("\010\007\377\377\377\377\377\377\377")}, {0x800000000000000ull, ByteSequence("\010\010\000\000\000\000\000\000\000")}, {0x800000000000001ull, ByteSequence("\010\010\000\000\000\000\000\000\001")}, {0xfffffffffffffffull, ByteSequence("\010\017\377\377\377\377\377\377\377")}, {0x1000000000000000ull, ByteSequence("\010\020\000\000\000\000\000\000\000")}, {0x1000000000000001ull, ByteSequence("\010\020\000\000\000\000\000\000\001")}, {0x1fffffffffffffffull, ByteSequence("\010\037\377\377\377\377\377\377\377")}, {0x2000000000000000ull, ByteSequence("\010 \000\000\000\000\000\000\000")}, {0x2000000000000001ull, ByteSequence("\010 \000\000\000\000\000\000\001")}, {0x3fffffffffffffffull, ByteSequence("\010?\377\377\377\377\377\377\377")}, {0x4000000000000000ull, ByteSequence("\010@\000\000\000\000\000\000\000")}, {0x4000000000000001ull, ByteSequence("\010@\000\000\000\000\000\000\001")}, {0x7fffffffffffffffull, ByteSequence("\010\177\377\377\377\377\377\377\377")}, {0x8000000000000000ull, ByteSequence("\010\200\000\000\000\000\000\000\000")}, {0x8000000000000001ull, ByteSequence("\010\200\000\000\000\000\000\000\001")}, }; for (const auto& t : data) { uint64 num = t.first; string result; OrderedCode::WriteNumIncreasing(&result, num); EXPECT_EQ(t.second, result) << std::hex << num; StringPiece in = result; uint64 decoded; EXPECT_TRUE(OrderedCode::ReadNumIncreasing(&in, &decoded)); EXPECT_EQ(num, decoded); EXPECT_EQ("", in); } } TEST(EncodingIsExpected, Signed) { std::vector<std::pair<int64_t, string>> data = { {0ll, ByteSequence("\200")}, {1ll, ByteSequence("\201")}, {2ll, ByteSequence("\202")}, {1ll, ByteSequence("\201")}, {2ll, ByteSequence("\202")}, {3ll, ByteSequence("\203")}, {3ll, ByteSequence("\203")}, {4ll, ByteSequence("\204")}, {5ll, ByteSequence("\205")}, {7ll, ByteSequence("\207")}, {8ll, ByteSequence("\210")}, {9ll, ByteSequence("\211")}, {15ll, ByteSequence("\217")}, {16ll, ByteSequence("\220")}, {17ll, ByteSequence("\221")}, {31ll, ByteSequence("\237")}, {32ll, ByteSequence("\240")}, {33ll, ByteSequence("\241")}, {63ll, ByteSequence("\277")}, {64ll, ByteSequence("\300@")}, {65ll, ByteSequence("\300A")}, {127ll, ByteSequence("\300\177")}, {128ll, ByteSequence("\300\200")}, {129ll, ByteSequence("\300\201")}, {255ll, ByteSequence("\300\377")}, {256ll, ByteSequence("\301\000")}, {257ll, ByteSequence("\301\001")}, {511ll, ByteSequence("\301\377")}, {512ll, ByteSequence("\302\000")}, {513ll, ByteSequence("\302\001")}, {1023ll, ByteSequence("\303\377")}, {1024ll, ByteSequence("\304\000")}, {1025ll, ByteSequence("\304\001")}, {2047ll, ByteSequence("\307\377")}, {2048ll, ByteSequence("\310\000")}, {2049ll, ByteSequence("\310\001")}, {4095ll, ByteSequence("\317\377")}, {4096ll, ByteSequence("\320\000")}, {4097ll, ByteSequence("\320\001")}, {8191ll, ByteSequence("\337\377")}, {8192ll, ByteSequence("\340 \000")}, {8193ll, ByteSequence("\340 \001")}, {16383ll, ByteSequence("\340?\377")}, {16384ll, ByteSequence("\340@\000")}, {16385ll, ByteSequence("\340@\001")}, {32767ll, ByteSequence("\340\177\377")}, {32768ll, ByteSequence("\340\200\000")}, {32769ll, ByteSequence("\340\200\001")}, {65535ll, ByteSequence("\340\377\377")}, {65536ll, ByteSequence("\341\000\000")}, {65537ll, ByteSequence("\341\000\001")}, {131071ll, ByteSequence("\341\377\377")}, {131072ll, ByteSequence("\342\000\000")}, {131073ll, ByteSequence("\342\000\001")}, {262143ll, ByteSequence("\343\377\377")}, {262144ll, ByteSequence("\344\000\000")}, {262145ll, ByteSequence("\344\000\001")}, {524287ll, ByteSequence("\347\377\377")}, {524288ll, ByteSequence("\350\000\000")}, {524289ll, ByteSequence("\350\000\001")}, {1048575ll, ByteSequence("\357\377\377")}, {1048576ll, ByteSequence("\360\020\000\000")}, {1048577ll, ByteSequence("\360\020\000\001")}, {2097151ll, ByteSequence("\360\037\377\377")}, {2097152ll, ByteSequence("\360 \000\000")}, {2097153ll, ByteSequence("\360 \000\001")}, {4194303ll, ByteSequence("\360?\377\377")}, {4194304ll, ByteSequence("\360@\000\000")}, {4194305ll, ByteSequence("\360@\000\001")}, {8388607ll, ByteSequence("\360\177\377\377")}, {8388608ll, ByteSequence("\360\200\000\000")}, {8388609ll, ByteSequence("\360\200\000\001")}, {16777215ll, ByteSequence("\360\377\377\377")}, {16777216ll, ByteSequence("\361\000\000\000")}, {16777217ll, ByteSequence("\361\000\000\001")}, {33554431ll, ByteSequence
void OrderedCode::WriteNumIncreasing(string* dest, uint64 val) { unsigned char buf[9]; int len = 0; while (val > 0) { len++; buf[9 - len] = (val & 0xff); val >>= 8; } buf[9 - len - 1] = len; len++; AppendBytes(dest, reinterpret_cast<const char*>(buf + 9 - len), len); }
TEST(Uint64, EncodeDecode) { TestNumbers<uint64>(1); } TEST(EncodingIsExpected, Unsigned) { std::vector<std::pair<uint64, string>> data = { {0x0ull, ByteSequence("\000")}, {0x1ull, ByteSequence("\001\001")}, {0x2ull, ByteSequence("\001\002")}, {0x1ull, ByteSequence("\001\001")}, {0x2ull, ByteSequence("\001\002")}, {0x3ull, ByteSequence("\001\003")}, {0x3ull, ByteSequence("\001\003")}, {0x4ull, ByteSequence("\001\004")}, {0x5ull, ByteSequence("\001\005")}, {0x7ull, ByteSequence("\001\007")}, {0x8ull, ByteSequence("\001\010")}, {0x9ull, ByteSequence("\001\t")}, {0xfull, ByteSequence("\001\017")}, {0x10ull, ByteSequence("\001\020")}, {0x11ull, ByteSequence("\001\021")}, {0x1full, ByteSequence("\001\037")}, {0x20ull, ByteSequence("\001 ")}, {0x21ull, ByteSequence("\001!")}, {0x3full, ByteSequence("\001?")}, {0x40ull, ByteSequence("\001@")}, {0x41ull, ByteSequence("\001A")}, {0x7full, ByteSequence("\001\177")}, {0x80ull, ByteSequence("\001\200")}, {0x81ull, ByteSequence("\001\201")}, {0xffull, ByteSequence("\001\377")}, {0x100ull, ByteSequence("\002\001\000")}, {0x101ull, ByteSequence("\002\001\001")}, {0x1ffull, ByteSequence("\002\001\377")}, {0x200ull, ByteSequence("\002\002\000")}, {0x201ull, ByteSequence("\002\002\001")}, {0x3ffull, ByteSequence("\002\003\377")}, {0x400ull, ByteSequence("\002\004\000")}, {0x401ull, ByteSequence("\002\004\001")}, {0x7ffull, ByteSequence("\002\007\377")}, {0x800ull, ByteSequence("\002\010\000")}, {0x801ull, ByteSequence("\002\010\001")}, {0xfffull, ByteSequence("\002\017\377")}, {0x1000ull, ByteSequence("\002\020\000")}, {0x1001ull, ByteSequence("\002\020\001")}, {0x1fffull, ByteSequence("\002\037\377")}, {0x2000ull, ByteSequence("\002 \000")}, {0x2001ull, ByteSequence("\002 \001")}, {0x3fffull, ByteSequence("\002?\377")}, {0x4000ull, ByteSequence("\002@\000")}, {0x4001ull, ByteSequence("\002@\001")}, {0x7fffull, ByteSequence("\002\177\377")}, {0x8000ull, ByteSequence("\002\200\000")}, {0x8001ull, ByteSequence("\002\200\001")}, {0xffffull, ByteSequence("\002\377\377")}, {0x10000ull, ByteSequence("\003\001\000\000")}, {0x10001ull, ByteSequence("\003\001\000\001")}, {0x1ffffull, ByteSequence("\003\001\377\377")}, {0x20000ull, ByteSequence("\003\002\000\000")}, {0x20001ull, ByteSequence("\003\002\000\001")}, {0x3ffffull, ByteSequence("\003\003\377\377")}, {0x40000ull, ByteSequence("\003\004\000\000")}, {0x40001ull, ByteSequence("\003\004\000\001")}, {0x7ffffull, ByteSequence("\003\007\377\377")}, {0x80000ull, ByteSequence("\003\010\000\000")}, {0x80001ull, ByteSequence("\003\010\000\001")}, {0xfffffull, ByteSequence("\003\017\377\377")}, {0x100000ull, ByteSequence("\003\020\000\000")}, {0x100001ull, ByteSequence("\003\020\000\001")}, {0x1fffffull, ByteSequence("\003\037\377\377")}, {0x200000ull, ByteSequence("\003 \000\000")}, {0x200001ull, ByteSequence("\003 \000\001")}, {0x3fffffull, ByteSequence("\003?\377\377")}, {0x400000ull, ByteSequence("\003@\000\000")}, {0x400001ull, ByteSequence("\003@\000\001")}, {0x7fffffull, ByteSequence("\003\177\377\377")}, {0x800000ull, ByteSequence("\003\200\000\000")}, {0x800001ull, ByteSequence("\003\200\000\001")}, {0xffffffull, ByteSequence("\003\377\377\377")}, {0x1000000ull, ByteSequence("\004\001\000\000\000")}, {0x1000001ull, ByteSequence("\004\001\000\000\001")}, {0x1ffffffull, ByteSequence("\004\001\377\377\377")}, {0x2000000ull, ByteSequence("\004\002\000\000\000")}, {0x2000001ull, ByteSequence("\004\002\000\000\001")}, {0x3ffffffull, ByteSequence("\004\003\377\377\377")}, {0x4000000ull, ByteSequence("\004\004\000\000\000")}, {0x4000001ull, ByteSequence("\004\004\000\000\001")}, {0x7ffffffull, ByteSequence("\004\007\377\377\377")}, {0x8000000ull, ByteSequence("\004\010\000\000\000")}, {0x8000001ull, ByteSequence("\004\010\000\000\001")}, {0xfffffffull, ByteSequence("\004\017\377\377\377")}, {0x10000000ull, ByteSequence("\004\020\000\000\000")}, {0x10000001ull, ByteSequence("\004\020\000\000\001")}, {0x1fffffffull, ByteSequence("\004\037\377\377\377")}, {0x20000000ull, ByteSequence("\004 \000\000\000")}, {0x20000001ull, ByteSequence("\004 \000\000\001")}, {0x3fffffffull, ByteSequence("\004?\377\377\377")}, {0x40000000ull, ByteSequence("\004@\000\000\000")}, {0x40000001ull, ByteSequence("\004@\000\000\001")}, {0x7fffffffull, ByteSequence("\004\177\377\377\377")}, {0x80000000ull, ByteSequence("\004\200\000\000\000")}, {0x80000001ull, ByteSequence("\004\200\000\000\001")}, {0xffffffffull, ByteSequence("\004\377\377\377\377")}, {0x100000000ull, ByteSequence("\005\001\000\000\000\000")}, {0x100000001ull, ByteSequence("\005\001\000\000\000\001")}, {0x1ffffffffull, ByteSequence("\005\001\377\377\377\377")}, {0x200000000ull, ByteSequence("\005\002\000\000\000\000")}, {0x200000001ull, ByteSequence("\005\002\000\000\000\001")}, {0x3ffffffffull, ByteSequence("\005\003\377\377\377\377")}, {0x400000000ull, ByteSequence("\005\004\000\000\000\000")}, {0x400000001ull, ByteSequence("\005\004\000\000\000\001")}, {0x7ffffffffull, ByteSequence("\005\007\377\377\377\377")}, {0x800000000ull, ByteSequence("\005\010\000\000\000\000")}, {0x800000001ull, ByteSequence("\005\010\000\000\000\001")}, {0xfffffffffull, ByteSequence("\005\017\377\377\377\377")}, {0x1000000000ull, ByteSequence("\005\020\000\000\000\000")}, {0x1000000001ull, ByteSequence("\005\020\000\000\000\001")}, {0x1fffffffffull, ByteSequence("\005\037\377\377\377\377")}, {0x2000000000ull, ByteSequence("\005 \000\000\000\000")}, {0x2000000001ull, ByteSequence("\005 \000\000\000\001")}, {0x3fffffffffull, ByteSequence("\005?\377\377\377\377")}, {0x4000000000ull, ByteSequence("\005@\000\000\000\000")}, {0x4000000001ull, ByteSequence("\005@\000\000\000\001")}, {0x7fffffffffull, ByteSequence("\005\177\377\377\377\377")}, {0x8000000000ull, ByteSequence("\005\200\000\000\000\000")}, {0x8000000001ull, ByteSequence("\005\200\000\000\000\001")}, {0xffffffffffull, ByteSequence("\005\377\377\377\377\377")}, {0x10000000000ull, ByteSequence("\006\001\000\000\000\000\000")}, {0x10000000001ull, ByteSequence("\006\001\000\000\000\000\001")}, {0x1ffffffffffull, ByteSequence("\006\001\377\377\377\377\377")}, {0x20000000000ull, ByteSequence("\006\002\000\000\000\000\000")}, {0x20000000001ull, ByteSequence("\006\002\000\000\000\000\001")}, {0x3ffffffffffull, ByteSequence("\006\003\377\377\377\377\377")}, {0x40000000000ull, ByteSequence("\006\004\000\000\000\000\000")}, {0x40000000001ull, ByteSequence("\006\004\000\000\000\000\001")}, {0x7ffffffffffull, ByteSequence("\006\007\377\377\377\377\377")}, {0x80000000000ull, ByteSequence("\006\010\000\000\000\000\000")}, {0x80000000001ull, ByteSequence("\006\010\000\000\000\000\001")}, {0xfffffffffffull, ByteSequence("\006\017\377\377\377\377\377")}, {0x100000000000ull, ByteSequence("\006\020\000\000\000\000\000")}, {0x100000000001ull, ByteSequence("\006\020\000\000\000\000\001")}, {0x1fffffffffffull, ByteSequence("\006\037\377\377\377\377\377")}, {0x200000000000ull, ByteSequence("\006 \000\000\000\000\000")}, {0x200000000001ull, ByteSequence("\006 \000\000\000\000\001")}, {0x3fffffffffffull, ByteSequence("\006?\377\377\377\377\377")}, {0x400000000000ull, ByteSequence("\006@\000\000\000\000\000")}, {0x400000000001ull, ByteSequence("\006@\000\000\000\000\001")}, {0x7fffffffffffull, ByteSequence("\006\177\377\377\377\377\377")}, {0x800000000000ull, ByteSequence("\006\200\000\000\000\000\000")}, {0x800000000001ull, ByteSequence("\006\200\000\000\000\000\001")}, {0xffffffffffffull, ByteSequence("\006\377\377\377\377\377\377")}, {0x1000000000000ull, ByteSequence("\007\001\000\000\000\000\000\000")}, {0x1000000000001ull, ByteSequence("\007\001\000\000\000\000\000\001")}, {0x1ffffffffffffull, ByteSequence("\007\001\377\377\377\377\377\377")}, {0x2000000000000ull, ByteSequence("\007\002\000\000\000\000\000\000")}, {0x2000000000001ull, ByteSequence("\007\002\000\000\000\000\000\001")}, {0x3ffffffffffffull, ByteSequence("\007\003\377\377\377\377\377\377")}, {0x4000000000000ull, ByteSequence("\007\004\000\000\000\000\000\000")}, {0x4000000000001ull, ByteSequence("\007\004\000\000\000\000\000\001")}, {0x7ffffffffffffull, ByteSequence("\007\007\377\377\377\377\377\377")}, {0x8000000000000ull, ByteSequence("\007\010\000\000\000\000\000\000")}, {0x8000000000001ull, ByteSequence("\007\010\000\000\000\000\000\001")}, {0xfffffffffffffull, ByteSequence("\007\017\377\377\377\377\377\377")}, {0x10000000000000ull, ByteSequence("\007\020\000\000\000\000\000\000")}, {0x10000000000001ull, ByteSequence("\007\020\000\000\000\000\000\001")}, {0x1fffffffffffffull, ByteSequence("\007\037\377\377\377\377\377\377")}, {0x20000000000000ull, ByteSequence("\007 \000\000\000\000\000\000")}, {0x20000000000001ull, ByteSequence("\007 \000\000\000\000\000\001")}, {0x3fffffffffffffull, ByteSequence("\007?\377\377\377\377\377\377")}, {0x40000000000000ull, ByteSequence("\007@\000\000\000\000\000\000")}, {0x40000000000001ull, ByteSequence("\007@\000\000\000\000\000\001")}, {0x7fffffffffffffull, ByteSequence("\007\177\377\377\377\377\377\377")}, {0x80000000000000ull, ByteSequence("\007\200\000\000\000\000\000\000")}, {0x80000000000001ull, ByteSequence("\007\200\000\000\000\000\000\001")}, {0xffffffffffffffull, ByteSequence("\007\377\377\377\377\377\377\377")}, {0x100000000000000ull, ByteSequence("\010\001\000\000\000\000\000\000\000")}, {0x100000000000001ull, ByteSequence("\010\001\000\000\000\000\000\000\001")}, {0x1ffffffffffffffull, ByteSequence("\010\001\377\377\377\377\377\377\377")}, {0x200000000000000ull, ByteSequence("\010\002\000\000\000\000\000\000\000")}, {0x200000000000001ull, ByteSequence("\010\002\000\000\000\000\000\000\001")}, {0x3ffffffffffffffull, ByteSequence("\010\003\377\377\377\377\377\377\377")}, {0x400000000000000ull, ByteSequence("\010\004\000\000\000\000\000\000\000")}, {0x400000000000001ull, ByteSequence("\010\004\000\000\000\000\000\000\001")}, {0x7ffffffffffffffull, ByteSequence("\010\007\377\377\377\377\377\377\377")}, {0x800000000000000ull, ByteSequence("\010\010\000\000\000\000\000\000\000")}, {0x800000000000001ull, ByteSequence("\010\010\000\000\000\000\000\000\001")}, {0xfffffffffffffffull, ByteSequence("\010\017\377\377\377\377\377\377\377")}, {0x1000000000000000ull, ByteSequence("\010\020\000\000\000\000\000\000\000")}, {0x1000000000000001ull, ByteSequence("\010\020\000\000\000\000\000\000\001")}, {0x1fffffffffffffffull, ByteSequence("\010\037\377\377\377\377\377\377\377")}, {0x2000000000000000ull, ByteSequence("\010 \000\000\000\000\000\000\000")}, {0x2000000000000001ull, ByteSequence("\010 \000\000\000\000\000\000\001")}, {0x3fffffffffffffffull, ByteSequence("\010?\377\377\377\377\377\377\377")}, {0x4000000000000000ull, ByteSequence("\010@\000\000\000\000\000\000\000")}, {0x4000000000000001ull, ByteSequence("\010@\000\000\000\000\000\000\001")}, {0x7fffffffffffffffull, ByteSequence("\010\177\377\377\377\377\377\377\377")}, {0x8000000000000000ull, ByteSequence("\010\200\000\000\000\000\000\000\000")}, {0x8000000000000001ull, ByteSequence("\010\200\000\000\000\000\000\000\001")}, }; for (const auto& t : data) { uint64 num = t.first; string result; OrderedCode::WriteNumIncreasing(&result, num); EXPECT_EQ(t.second, result) << std::hex << num; StringPiece in = result; uint64 decoded; EXPECT_TRUE(OrderedCode::ReadNumIncreasing(&in, &decoded)); EXPECT_EQ(num, decoded); EXPECT_EQ("", in); } }
#include "tensorflow/lite/toco/import_tensorflow.h" #include <memory> #include <string> #include <utility> #include <vector> #include "google/protobuf/map.h" #include "google/protobuf/text_format.h" #include "absl/memory/memory.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" #include "absl/strings/strip.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/process_function_library_runtime.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" #include "tensorflow/lite/toco/model.h" #include "tensorflow/lite/toco/model_flags.pb.h" #include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_cluster.h" #include "tensorflow/lite/toco/tensorflow_util.h" #include "tensorflow/lite/toco/tooling_util.h" using tensorflow::AttrValue; using tensorflow::DT_BOOL; using tensorflow::DT_COMPLEX64; using tensorflow::DT_FLOAT; using tensorflow::DT_INT16; using tensorflow::DT_INT32; using tensorflow::DT_INT64; using tensorflow::DT_QUINT8; using tensorflow::DT_STRING; using tensorflow::DT_UINT16; using tensorflow::DT_UINT32; using tensorflow::DT_UINT8; using tensorflow::GraphDef; using tensorflow::NodeDef; using tensorflow::TensorProto; using tensorflow::TensorShapeProto; namespace toco { namespace { bool HasAttr(const NodeDef& node, const std::string& attr_name) { return node.attr().count(attr_name) > 0; } bool HasWildcardDimension(const TensorShapeProto& shape) { for (const auto& dim : shape.dim()) { if (dim.size() == -1) return true; } return false; } const std::string& GetStringAttr(const NodeDef& node, const std::string& attr_name) { CHECK(HasAttr(node, attr_name)); const auto& attr = node.attr().at(attr_name); CHECK_EQ(attr.value_case(), AttrValue::kS); return attr.s(); } int64_t GetIntAttr(const NodeDef& node, const std::string& attr_name) { CHECK(HasAttr(node, attr_name)) << attr_name << " not found in:\n" << node.DebugString(); const auto& attr = node.attr().at(attr_name); CHECK_EQ(attr.value_case(), AttrValue::kI); return attr.i(); } float GetFloatAttr(const NodeDef& node, const std::string& attr_name) { CHECK(HasAttr(node, attr_name)); const auto& attr = node.attr().at(attr_name); CHECK_EQ(attr.value_case(), AttrValue::kF); return attr.f(); } bool GetBoolAttr(const NodeDef& node, const std::string& attr_name) { CHECK(HasAttr(node, attr_name)); const auto& attr = node.attr().at(attr_name); CHECK_EQ(attr.value_case(), AttrValue::kB); return attr.b(); } tensorflow::DataType GetDataTypeAttr(const NodeDef& node, const std::string& attr_name) { CHECK(HasAttr(node, attr_name)); const auto& attr = node.attr().at(attr_name); CHECK_EQ(attr.value_case(), AttrValue::kType); return attr.type(); } const TensorShapeProto& GetShapeAttr(const NodeDef& node, const std::string& attr_name) { CHECK(HasAttr(node, attr_name)); const auto& attr = node.attr().at(attr_name); CHECK_EQ(attr.value_case(), AttrValue::kShape); return attr.shape(); } const TensorProto& GetTensorAttr(const NodeDef& node, const std::string& attr_name) { CHECK(HasAttr(node, attr_name)) << "No attr named '" << attr_name << "'"; const auto& attr = node.attr().at(attr_name); CHECK_EQ(attr.value_case(), AttrValue::kTensor); return attr.tensor(); } const AttrValue::ListValue& GetListAttr(const NodeDef& node, const std::string& attr_name) { CHECK(HasAttr(node, attr_name)); const auto& attr = node.attr().at(attr_name); CHECK_EQ(attr.value_case(), AttrValue::kList); return attr.list(); } tensorflow::Status CheckOptionalAttr(const NodeDef& node, const std::string& attr_name, const std::string& expected_value) { if (HasAttr(node, attr_name)) { const std::string& value = GetStringAttr(node, attr_name); if (value != expected_value) { return tensorflow::errors::InvalidArgument( "Unexpected value for attribute '" + attr_name + "'. Expected '" + expected_value + "'"); } } return absl::OkStatus(); } tensorflow::Status CheckOptionalAttr( const NodeDef& node, const std::string& attr_name, const tensorflow::DataType& expected_value) { if (HasAttr(node, attr_name)) { const tensorflow::DataType& value = GetDataTypeAttr(node, attr_name); if (value != expected_value) { return tensorflow::errors::InvalidArgument( "Unexpected value for attribute '" + attr_name + "'. Expected '" + tensorflow::DataType_Name(expected_value) + "'"); } } return absl::OkStatus(); } template <typename T1, typename T2> tensorflow::Status ExpectValue(const T1& v1, const T2& v2, const std::string& description) { if (v1 == v2) return absl::OkStatus(); return tensorflow::errors::InvalidArgument(absl::StrCat( "Unexpected ", description, ": got ", v1, ", expected ", v2)); } ArrayDataType ConvertDataType(tensorflow::DataType dtype) { if (dtype == DT_UINT8) return ArrayDataType::kUint8; else if (dtype == DT_FLOAT) return ArrayDataType::kFloat; else if (dtype == DT_BOOL) return ArrayDataType::kBool; else if (dtype == DT_INT16) return ArrayDataType::kInt16; else if (dtype == DT_UINT16) return ArrayDataType::kUint16; else if (dtype == DT_INT32) return ArrayDataType::kInt32; else if (dtype == DT_UINT32) return ArrayDataType::kUint32; else if (dtype == DT_INT64) return ArrayDataType::kInt64; else if (dtype == DT_STRING) return ArrayDataType::kString; else if (dtype == DT_COMPLEX64) return ArrayDataType::kComplex64; else LOG(INFO) << "Unsupported data type in placeholder op: " << dtype; return ArrayDataType::kNone; } tensorflow::Status ImportShape( const TFLITE_PROTO_NS::RepeatedPtrField<tensorflow::TensorShapeProto_Dim>& input_dims, int* input_flat_size, Shape* shape) { std::vector<int> input_dims_only_sizes; bool zero_sized_shape = false; for (auto& d : input_dims) { if (d.size() > std::numeric_limits<int>::max()) { return tensorflow::errors::InvalidArgument("Shape element overflows"); } if (d.size() == 0) { zero_sized_shape = true; } input_dims_only_sizes.push_back(d.size()); } if (zero_sized_shape) { shape->mutable_dims()->clear(); if (input_flat_size != nullptr) *input_flat_size = 0; return absl::OkStatus(); } *shape->mutable_dims() = input_dims_only_sizes; if (input_flat_size == nullptr) return absl::OkStatus(); return NumElements(input_dims_only_sizes, input_flat_size); } template <typename T> struct TensorTraits; template <> struct TensorTraits<float> { static int size(const TensorProto& p) { return p.float_val_size(); } static float get(const TensorProto& p, int i) { return p.float_val(i); } static std::string accessor_name() { return "float_val"; } static std::string type_name() { return "float"; } static void CopyFromContent(const TensorProto& p, std::vector<float>* data) { toco::port::CopyToBuffer(p.tensor_content(), reinterpret_cast<char*>(data->data())); } }; template <> struct TensorTraits<uint8_t> { static int size(const TensorProto& p) { return p.int_val_size(); } static uint8_t get(const TensorProto& p, int i) { return p.int_val(i); } static std::string accessor_name() { return "int_val"; } static std::string type_name() { return "uint8"; } static void CopyFromContent(const TensorProto& p, std::vector<uint8_t>* data) { toco::port::CopyToBuffer(p.tensor_content(), reinterpret_cast<char*>(data->data())); } }; template <> struct TensorTraits<std::complex<float>> { static int size(const TensorProto& p) { return p.scomplex_val_size() / 2; } static std::complex<float> get(const TensorProto& p, int i) { return std::complex<float>(p.scomplex_val(2 * i), p.scomplex_val(2 * i + 1)); } static std::string accessor_name() { return "scomplex_val"; } static std::string type_name() { return "complex64"; } static void CopyFromContent(const TensorProto& p, std::vector<std::complex<float>>* data) { toco::port::CopyToBuffer(p.tensor_content(), reinterpret_cast<char*>(data->data())); } }; template <> struct TensorTraits<int32> { static int size(const TensorProto& p) { return p.int_val_size(); } static int32 get(const TensorProto& p, int i) { return p.int_val(i); } static std::string accessor_name() { return "int_val"; } static std::string type_name() { return "int32"; } static void CopyFromContent(const TensorProto& p, std::vector<int32>* data) { toco::port::CopyToBuffer(p.tensor_content(), reinterpret_cast<char*>(data->data())); } }; template <> struct TensorTraits<uint32> { static int size(const TensorProto& p) { return p.uint32_val_size(); } static int32 get(const TensorProto& p, int i) { return p.uint32_val(i); } static std::string accessor_name() { return "uint32_val"; } static std::string type_name() { return "uint32"; } static void CopyFromContent(const TensorProto& p, std::vector<uint32>* data) { toco::port::CopyToBuffer(p.tensor_content(), reinterpret_cast<char*>(data->data())); } }; template <> struct TensorTraits<int64_t> { static int size(const TensorProto& p) { return p.int64_val_size(); } static int64_t get(const TensorProto& p, int i) { return p.int64_val(i); } static std::string accessor_name() { return "int64_val"; } static std::string type_name() { return "int64"; } static void CopyFromContent(const TensorProto& p, std::vector<int64_t>* data) { toco::port::CopyToBuffer(p.tensor_content(), reinterpret_cast<char*>(data->data())); } }; template <> struct TensorTraits<bool> { static int size(const TensorProto& p) { return p.bool_val_size(); } static bool get(const TensorProto& p, int i) { return p.bool_val(i); } static std::string accessor_name() { return "bool_val"; } static std::string type_name() { return "bool"; } static void CopyFromContent(const TensorProto& p, std::vector<bool>* data) { std::vector<char> buf(p.tensor_content().size()); toco::port::CopyToBuffer(p.tensor_content(), buf.data()); for (int i = 0; i < p.tensor_content().size(); i++) { (*data)[i] = static_cast<bool>(buf[i]); } } }; template <typename T> tensorflow::Status ImportTensorData(const TensorProto& input_tensor, int input_flat_size, std::vector<T>* output_data) { CHECK_GE(output_data->size(), input_flat_size); int num_elements_in_tensor = TensorTraits<T>::size(input_tensor); if (num_elements_in_tensor == input_flat_size) { for (int i = 0; i < num_elements_in_tensor; i++) { (*output_data)[i] = TensorTraits<T>::get(input_tensor, i); } } else if (input_tensor.tensor_content().size() == input_flat_size * sizeof(T)) { TensorTraits<T>::CopyFromContent(input_tensor, output_data); } else if (num_elements_in_tensor >= 0 && num_elements_in_tensor < input_flat_size) { int i = 0; for (; i < num_elements_in_tensor; ++i) { (*output_data)[i] = TensorTraits<T>::get(input_tensor, i); } auto last = i == 0 ? T(0) : (*output_data)[i - 1]; for (; i < input_flat_size; ++i) { (*output_data)[i] = last; } } else { std::string accessor_name = TensorTraits<T>::accessor_name(); std::string type_name = TensorTraits<T>::type_name(); return tensorflow::errors::InvalidArgument( absl::StrCat("Neither input_content (", input_tensor.tensor_content().size() / sizeof(T), ") nor ", accessor_name, " (", num_elements_in_tensor, ") have the right dimensions (", input_flat_size, ") for this ", type_name, " tensor")); } return absl::OkStatus(); } tensorflow::Status ImportFloatArray(const TensorProto& input_tensor, Array* output_array) { CHECK_EQ(input_tensor.dtype(), DT_FLOAT); const auto& input_shape = input_tensor.tensor_shape(); CHECK_LE(input_shape.dim_size(), 6); int input_flat_size; auto status = ImportShape(input_shape.dim(), &input_flat_size, output_array->mutable_shape()); if (!status.ok()) return status; auto& output_float_data = output_array->GetMutableBuffer<ArrayDataType::kFloat>().data; output_float_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0.f); return ImportTensorData<float>(input_tensor, input_flat_size, &output_float_data); } tensorflow::Status ImportComplex64Array(const TensorProto& input_tensor, Array* output_array) { CHECK_EQ(input_tensor.dtype(), DT_COMPLEX64); const auto& input_shape = input_tensor.tensor_shape(); CHECK_LE(input_shape.dim_size(), 4); int input_flat_size; auto status = ImportShape(input_shape.dim(), &input_flat_size, output_array->mutable_shape()); if (!status.ok()) return status; auto& output_complex_data = output_array->GetMutableBuffer<ArrayDataType::kComplex64>().data; output_complex_data.resize(RequiredBufferSizeForShape(output_array->shape()), std::complex<float>(0.f, 0.f)); return ImportTensorData<std::complex<float>>(input_tensor, input_flat_size, &output_complex_data); } tensorflow::Status ImportQuint8Array(const TensorProto& input_tensor, Array* output_array) { CHECK_EQ(input_tensor.dtype(), DT_QUINT8); const auto& input_shape = input_tensor.tensor_shape(); CHECK_LE(input_shape.dim_size(), 6); int input_flat_size; auto status = ImportShape(input_shape.dim(), &input_flat_size, output_array->mutable_shape()); if (!status.ok()) return status; auto& output_int_data = output_array->GetMutableBuffer<ArrayDataType::kUint8>().data; output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0); return ImportTensorData<uint8_t>(input_tensor, input_flat_size, &output_int_data); } tensorflow::Status ImportInt32Array(const TensorProto& input_tensor, Array* output_array) { CHECK_EQ(input_tensor.dtype(), DT_INT32); const auto& input_shape = input_tensor.tensor_shape(); CHECK_LE(input_shape.dim_size(), 6); int input_flat_size; auto status = ImportShape(input_shape.dim(), &input_flat_size, output_array->mutable_shape()); if (!status.ok()) return status; auto& output_int_data = output_array->GetMutableBuffer<ArrayDataType::kInt32>().data; output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0); return ImportTensorData<int32>(input_tensor, input_flat_size, &output_int_data); } tensorflow::Status ImportUint32Array(const TensorProto& input_tensor, Array* output_array) { CHECK_EQ(input_tensor.dtype(), DT_UINT32); const auto& input_shape = input_tensor.tensor_shape(); CHECK_LE(input_shape.dim_size(), 6); int input_flat_size; auto status = ImportShape(input_shape.dim(), &input_flat_size, output_array->mutable_shape()); if (!status.ok()) return status; auto& output_int_data = output_array->GetMutableBuffer<ArrayDataType::kUint32>().data; output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0); return ImportTensorData<uint32>(input_tensor, input_flat_size, &output_int_data); } tensorflow::Status ImportInt64Array(const TensorProto& input_tensor, Array* output_array) { CHECK_EQ(input_tensor.dtype(), DT_INT64); const auto& input_shape = input_tensor.tensor_shape(); CHECK_LE(input_shape.dim_size(), 6); int input_flat_size; auto status = ImportShape(input_shape.dim(), &input_flat_size, output_array->mutable_shape()); if (!status.ok()) return status; auto& output_int_data = output_array->GetMutableBuffer<ArrayDataType::kInt64>().data; output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0); return ImportTensorData<int64_t>(input_tensor, input_flat_size, &output_int_data); } tensorflow::Status ImportBoolArray(const TensorProto& input_tensor, Array* output_array) { CHECK_EQ(input_tensor.dtype(), DT_BOOL); const auto& input_shape = input_tensor.tensor_shape(); CHECK_LE(input_shape.dim_size(), 6); int input_flat_size; auto status = ImportShape(input_shape.dim(), &input_flat_size, output_array->mutable_shape()); if (!status.ok()) return status; auto& output_bool_data = output_array->GetMutableBuffer<ArrayDataType::kBool>().data; output_bool_data.resize(RequiredBufferSizeForShape(output_array->shape()), false); status = ImportTensorData<bool>(input_tensor, input_flat_size, &output_bool_data); if (!status.ok() && output_bool_data.size() == 1) { output_bool_data[0] = false; return absl::OkStatus(); } return status; } tensorflow::Status ImportStringArray(const TensorProto& input_tensor, Array* output_array) { CHECK_EQ(input_tensor.dtype(), DT_STRING); const auto& input_shape = input_tensor.tensor_shape(); CHECK_LE(input_shape.dim_size(), 6); int input_flat_size; auto status = ImportShape(input_shape.dim(), &input_flat_size, output_array->mutable_shape()); if (!status.ok()) return status; if (input_flat_size != input_tensor.string_val_size()) { return tensorflow::errors::InvalidArgument( "Input_content string_val doesn't have the right dimensions " "for this string tensor"); } auto& output_string_data = output_array->GetMutableBuffer<ArrayDataType::kString>().data; output_string_data.resize(RequiredBufferSizeForShape(output_array->shape())); CHECK_GE(output_string_data.size(), input_flat_size); for (int i = 0; i < input_flat_size; ++i) { output_string_data[i] = input_tensor.string_val(i); } return absl::OkStatus(); } int GetInputsCount(const NodeDef& node, const TensorFlowImportFlags& tf_import_flags) { if (tf_import_flags.drop_control_dependency) { for (size_t i = 0; i < node.input_size(); ++i) { if (node.input(i)[0] == '^') { return i; } } } return node.input_size(); } tensorflow::Status CheckInputsCount( const NodeDef& node, const TensorFlowImportFlags& tf_import_flags, int expected_input_count) { if (GetInputsCount(node, tf_import_flags) != expected_input_count) { return tensorflow::errors::FailedPrecondition( node.op(), " node expects ", expected_input_count, " input(s) other than control dependencies: ", node.DebugString()); } return absl::OkStatus(); } template <ArrayDataType T> std::string CreateConstArray( Model* model, std::string const& name, std::vector<typename toco::DataType<T>> const& data) { std::string array_name = toco::AvailableArrayName(*model, name); auto& array = model->GetOrCreateArray(array_name); array.data_type = T; array.mutable_shape()->mutable_dims()->emplace_back( static_cast<int>(data.size())); array.GetMutableBuffer<T>().data = data; return array_name; } void RetainTensorFlowNodeDef(const NodeDef& node, Operator* op) { node.SerializeToString(&op->tensorflow_node_def); } void GetOutputNamesFromNodeDef(const NodeDef& node, const tensorflow::OpDef& op_def, TensorFlowUnsupportedOperator* op) { int next_output = 0; auto add_output = [&node, &next_output, op]() { if (next_output == 0) { op->outputs.push_back(node.name()); } else { op->outputs.push_back(absl::StrCat(node.name(), ":", next_output)); } ++next_output; }; for (int i = 0; i < op_def.output_arg_size(); ++i) { std::string multiples = op_def.output_arg(i).number_attr(); if (!multiples.empty()) { CHECK(HasAttr(node, multiples)) << "No attr named " << multiples; int num_outputs = GetIntAttr(node, multiples); for (int j = 0; j < num_outputs; ++j) { add_output(); } } else { std::string list = op_def.output_arg(i).type_list_attr(); if (!list.empty()) { CHECK(HasAttr(node, list)) << "No attr named " << list; const AttrValue::ListValue& list_value = GetListAttr(node, list); for (int j = 0; j < list_value.type_size(); ++j) { add_output(); } } else { add_output(); } } } } void GetOutputTypesFromNodeDef(const NodeDef& node, const tensorflow::OpDef& op_def, TensorFlowUnsupportedOperator* op) { auto add_type = [&node, op](tensorflow::DataType type) { if (type == tensorflow::DT_INVALID) { LOG(WARNING) << "Op node missing output type attribute: " << node.name(); op->output_data_types.clear(); } else { op->output_data_types.push_back(ConvertDataType(type)); } }; auto get_type = [&node](const tensorflow::OpDef::ArgDef& a) { if (a.type() != tensorflow::DT_INVALID) { return a.type(); } else if (HasAttr(node, a.type_attr())) { return GetDataTypeAttr(node, a.type_attr()); } else { return tensorflow::DT_INVALID; } }; for (int i = 0; i < op_def.output_arg_size(); ++i) { std::string multiples = op_def.output_arg(i).number_attr(); if (!multiples.empty()) { CHECK(HasAttr(node, multiples)) << "No attr named " << multiples; int num_outputs = GetIntAttr(node, multiples); auto type = get_type(op_def.output_arg(i)); for (int j = 0; j < num_outputs; ++j) { add_type(type); } } else { std::string list = op_def.output_arg(i).type_list_attr(); if (!list.empty()) { CHECK(HasAttr(node, list)) << "No attr named " << list; const AttrValue::ListValue& list_value = GetListAttr(node, list); for (int j = 0; j < list_value.type_size(); ++j) { add_type(list_value.type(j)); } } else { add_type(get_type(op_def.output_arg(i))); } } } } tensorflow::Status ConvertUnsupportedOperator( const NodeDef& node, const TensorFlowImportFlags& tf_import_flags, const ModelFlags& model_flags, Model* model) { static constexpr char kAttrOutputQuantized[] = "_output_quantized"; static constexpr char kAttrOutputTypes[] = "_output_types"; static constexpr char kAttrOutputShapes[] = "_output_shapes"; static constexpr char kAttrSupportOutputTypeFloatInQuantizedOp[] = "_support_output_type_float_in_quantized_op"; LOG(INFO) << "Converting unsupported operation: " << node.op(); auto* op = new TensorFlowUnsupportedOperator; op->tensorflow_op = node.op(); RetainTensorFlowNodeDef(node, op); model->operators.emplace_back(op); const int num_inputs = GetInputsCount(node, tf_import_flags); for (int i = 0; i < num_inputs; ++i) { op->inputs.push_back(node.input(i)); } const tensorflow::OpDef* op_def = nullptr; if (tensorflow::OpRegistry::Global()->LookUpOpDef(node.op(), &op_def).ok()) { GetOutputNamesFromNodeDef(node, *op_def, op); } else { op->outputs.push_back(node.name()); } if (HasAttr(node, kAttrOutputQuantized)) { op->quantized = GetBoolAttr(node, kAttrOutputQuantized); } if (HasAttr(node, kAttrSupportOutputTypeFloatInQuantizedOp)) { op->support_output_type_float_in_quantized_op = GetBoolAttr(node, kAttrSupportOutputTypeFloatInQuantizedOp); } if (HasAttr(node, kAttrOutputTypes)) { const auto& output_types = GetListAttr(node, kAttrOutputTypes); for (int i = 0; i < output_types.type_size(); ++i) { op->output_data_types.push_back(ConvertDataType(output_types.type(i))); } } else if (HasAttr(node, "Tout")) { const auto& output_type = GetDataTypeAttr(node, "Tout"); op->output_data_types.push_back(ConvertDataType(output_type)); } else if (op_def != nullptr) { GetOutputTypesFromNodeDef(node, *op_def, op); } else { LOG(INFO) << "Unable to determine output type for op: " << node.op(); } if (HasAttr(node, kAttrOutputShapes)) { const auto& output_shapes = GetListAttr(node, kAttrOutputShapes); Shape output_shape; for (int i = 0; i < output_shapes.shape_size(); ++i) { const auto& shape = output_shapes.shape(i); if (HasWildcardDimension(shape)) { LOG(INFO) << "Skipping wildcard output shape(s) for node: " << node.name(); op->output_shapes.clear(); break; } const auto status = ImportShape(shape.dim(), nullptr, &output_shape); if (!status.ok()) { return status; } op->output_shapes.push_back(output_shape); } } return absl::OkStatus(); } tensorflow::Status ConvertConstOperator( const NodeDef& node, const TensorFlowImportFlags& tf_import_flags, const ModelFlags& model_flags, Model* model) { CHECK_EQ(node.op(), "Const"); const auto& tensor = GetTensorAttr(node, "value"); const auto dtype = GetDataTypeAttr(node, "dtype"); tensorflow::Status status = absl::OkStatus(); auto& array = model->GetOrCreateArray(node.name()); switch (dtype) { case DT_FLOAT: array.data_type = ArrayDataType::kFloat; status = ImportFloatArray(tensor, &array); break; case DT_INT32: array.data_type = ArrayDataType::kInt32; status = ImportInt32Array(tensor, &array); break; case DT_UINT32: array.data_type = ArrayDataType::kUint32; status = ImportUint32Array(tensor, &array); break; case DT_QUINT8: array.data_type = ArrayDataType::kUint8; status = ImportQuint8Array(tensor, &array); break; case DT_INT64: array.data_type = ArrayDataType::kInt64; status = ImportInt64Array(tensor, &array); break; case DT_STRING: array.data_type = ArrayDataType::kString; status = ImportStringArray(tensor, &array); break; case DT_BOOL: array.data_type = ArrayDataType::kBool; status = ImportBoolArray(tensor, &array); break; case DT_COMPLEX64: array.data_type = ArrayDataType::kComplex64; status = ImportComplex64Array(tensor, &array); break; default: array.data_type = ArrayDataType::kNone; array.GetMutableBuffer<ArrayDataType::kNone>(); break; } TF_RETURN_WITH_CONTEXT_IF_ERROR( status, " (while processing node '" + node.name() + "')"); return absl::OkStatus(); } tensorflow::Status ConvertConvOperator( const NodeDef& node, const TensorFlowImportFlags& tf_import
#include "tensorflow/lite/toco/import_tensorflow.h" #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/lite/testing/util.h" #include "tensorflow/lite/toco/toco_port.h" namespace toco { using tensorflow::AttrValue; using tensorflow::DT_BOOL; using tensorflow::DT_COMPLEX64; using tensorflow::DT_FLOAT; using tensorflow::DT_INT32; using tensorflow::DT_INT64; using tensorflow::DT_INVALID; using tensorflow::DT_QUINT8; using tensorflow::DT_STRING; using tensorflow::DT_UINT16; using tensorflow::DT_UINT32; using tensorflow::NodeDef; using tensorflow::Status; using ::testing::ElementsAre; namespace internal { using ConverterType = tensorflow::Status (*)( const NodeDef& node, const TensorFlowImportFlags& tf_import_flags, const ModelFlags& model_flags, Model* model); using ConverterMapType = std::unordered_map<std::string, ConverterType>; ConverterMapType GetTensorFlowNodeConverterMap(); ConverterMapType GetTensorFlowNodeConverterMapForFlex(); Status ImportTensorFlowNode(const NodeDef&, const TensorFlowImportFlags&, const ModelFlags& model_flags, Model*, const ConverterMapType&); } namespace { Status ImportNode(const NodeDef& node, Model* model) { const auto converter = internal::GetTensorFlowNodeConverterMap(); return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(), ModelFlags(), model, converter); } Status ImportFlexNode(const NodeDef& node, Model* model) { const auto converter = internal::ConverterMapType(); return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(), ModelFlags(), model, converter); } Status ImportNode(const NodeDef& node) { Model model; return ImportNode(node, &model); } NodeDef BuildNode( const std::string& op, const std::vector<std::initializer_list<int>>& output_shapes) { NodeDef node; node.set_op(op); node.set_name("Node1"); node.add_input(); node.set_input(0, "Node0"); AttrValue::ListValue* shapes = (*node.mutable_attr())["_output_shapes"].mutable_list(); for (const auto& output_shape : output_shapes) { tensorflow::TensorShapeProto* shape = shapes->add_shape(); for (int64_t output_shape_dim : output_shape) { auto shape_dim = shape->add_dim(); shape_dim->set_size(output_shape_dim); } } return node; } namespace { void BuildConstNode(std::initializer_list<int64_t> shape, tensorflow::DataType dtype, int64_t num_elements, NodeDef* node) { node->set_op("Const"); node->set_name("Node1"); AttrValue dtype_attr; SetAttrValue(dtype, &dtype_attr); (*node->mutable_attr())["dtype"] = dtype_attr; tensorflow::TensorProto t; t.set_dtype(dtype); auto* s = t.mutable_tensor_shape(); for (auto d : shape) { s->add_dim()->set_size(d); } switch (dtype) { case DT_FLOAT: for (int64_t i = 0; i < num_elements; ++i) { t.add_float_val(i / 10000.0 + 1); } break; case DT_INT32: for (int64_t i = 0; i < num_elements; ++i) { t.add_int_val(i % std::numeric_limits<int>::max() + 1); } break; case DT_UINT32: for (int64_t i = 0; i < num_elements; ++i) { t.add_int_val(i % std::numeric_limits<uint32_t>::max() + 1); } break; case DT_QUINT8: for (int64_t i = 0; i < num_elements; ++i) { t.add_int_val(i % std::numeric_limits<uint8_t>::max() + 1); } break; case DT_INT64: for (int64_t i = 0; i < num_elements; ++i) { t.add_int64_val(i + 1); } break; case DT_UINT16: for (int64_t i = 0; i < num_elements; ++i) { t.add_int_val(i % std::numeric_limits<uint16_t>::max() + 1); } break; case DT_STRING: break; case DT_BOOL: for (int64_t i = 0; i < num_elements; ++i) { t.add_bool_val((i % 2) == 0); } break; case DT_COMPLEX64: for (int64_t i = 0; i < num_elements; ++i) { t.add_scomplex_val(i / 10000.0 + 1); t.add_scomplex_val(-i / 10000.0 - 1); } break; default: break; } AttrValue value_attr; SetAttrValue(t, &value_attr); (*node->mutable_attr())["value"] = value_attr; } } TEST(FlexImportTest, ConditionalConst) { Model model; auto build_and_import_node = [&model](const std::string& name, std::initializer_list<int64_t> shape, tensorflow::DataType dtype, int64_t num_elements) { NodeDef node; BuildConstNode(shape, dtype, num_elements, &node); node.set_name(name); const auto converter = internal::GetTensorFlowNodeConverterMapForFlex(); return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(), ModelFlags(), &model, converter); }; EXPECT_TRUE(build_and_import_node("Known", {1, 2, 3}, DT_INT32, 6).ok()); EXPECT_TRUE(build_and_import_node("BadType", {1, 2, 3}, DT_INVALID, 6).ok()); EXPECT_TRUE(build_and_import_node("Unknown", {1, -2, 3}, DT_INT32, 6).ok()); EXPECT_EQ(model.operators.size(), 2); EXPECT_TRUE(model.HasArray("Known")); EXPECT_FALSE(model.HasArray("Unknown")); EXPECT_FALSE(model.HasArray("BadType")); } TEST(FlexImportTest, SoftmaxWithBeta) { NodeDef node; node.set_op("Softmax"); node.set_name("softmax"); node.add_input(); node.set_input(0, "logits"); AttrValue dtype_attr; SetAttrValue(0.5, &dtype_attr); (*node.mutable_attr())["_softmax_beta"] = dtype_attr; Model model; EXPECT_TRUE(ImportNode(node, &model).ok()); ASSERT_THAT(model.operators.size(), ::testing::Ge(1)); ASSERT_EQ(model.operators[0]->type, OperatorType::kSoftmax); const SoftmaxOperator* op = static_cast<const SoftmaxOperator*>(model.operators[0].get()); EXPECT_EQ(op->beta, 0.5); } TEST(FlexImportTest, SoftmaxWithoutBeta) { NodeDef node; node.set_op("Softmax"); node.set_name("softmax"); node.add_input(); node.set_input(0, "logits"); Model model; EXPECT_TRUE(ImportNode(node, &model).ok()); ASSERT_THAT(model.operators.size(), ::testing::Ge(1)); ASSERT_EQ(model.operators[0]->type, OperatorType::kSoftmax); const SoftmaxOperator* op = static_cast<const SoftmaxOperator*>(model.operators[0].get()); EXPECT_EQ(op->beta, 1.0); } class ShapeImportTest : public ::testing::TestWithParam<tensorflow::DataType> { }; TEST_P(ShapeImportTest, ShapeElementIsNegative) { NodeDef node; BuildConstNode({1, -2, 10}, GetParam(), 0, &node); auto status = ImportNode(node); EXPECT_EQ( status.message(), "Tensor shape should not include negative values\n\t (while processing " "node 'Node1')"); } TEST_P(ShapeImportTest, ShapeElementIsZero) { NodeDef node; BuildConstNode({1, 0, 10}, GetParam(), 0, &node); Model model; EXPECT_TRUE(ImportNode(node, &model).ok()); const auto& array = model.GetArray("Node1"); EXPECT_THAT(array.shape().dims(), ::testing::ElementsAre()); } TEST_P(ShapeImportTest, ShapeIsOneDimZero) { NodeDef node; BuildConstNode({0}, GetParam(), 0, &node); Model model; EXPECT_TRUE(ImportNode(node, &model).ok()); const auto& array = model.GetArray("Node1"); EXPECT_THAT(array.shape().dims(), ::testing::ElementsAre()); } TEST_P(ShapeImportTest, ShapeElementTooLarge) { NodeDef node; BuildConstNode({3000000000}, GetParam(), 0, &node); auto status = ImportNode(node); EXPECT_EQ(status.message(), "Shape element overflows\n\t (while processing node 'Node1')"); } TEST_P(ShapeImportTest, ShapeTooLarge) { NodeDef node; BuildConstNode({1000000, 2000000, 2000000, 2000000}, GetParam(), 0, &node); auto status = ImportNode(node); EXPECT_EQ(status.message(), "Tensor shape is too large\n\t (while processing node 'Node1')"); } std::vector<tensorflow::DataType> TestTypes() { return {DT_FLOAT, DT_INT32, DT_INT64, DT_BOOL, DT_QUINT8, DT_COMPLEX64}; } INSTANTIATE_TEST_SUITE_P(ShapeImportTest, ShapeImportTest, ::testing::ValuesIn(TestTypes())); class ContentImportTest : public ::testing::Test { public: template <ArrayDataType T> std::vector<DataType<T>> ImportAndGetData(const NodeDef& node) { Model model; auto status = ImportNode(node, &model); CHECK(status.ok()) << status.message(); const auto& array = model.GetArray("Node1"); return array.GetBuffer<T>().data; } void RemoveTrailingElements(NodeDef* node, int num) { tensorflow::TensorProto* p = node->mutable_attr()->at("value").mutable_tensor(); for (int i = 0; i < num; ++i) { if (p->int_val_size() > 0) p->mutable_int_val()->RemoveLast(); if (p->int64_val_size() > 0) p->mutable_int64_val()->RemoveLast(); if (p->float_val_size() > 0) p->mutable_float_val()->RemoveLast(); if (p->bool_val_size() > 0) p->mutable_bool_val()->RemoveLast(); if (p->scomplex_val_size() > 0) p->mutable_scomplex_val()->RemoveLast(); if (p->scomplex_val_size() > 0) p->mutable_scomplex_val()->RemoveLast(); } } }; TEST_F(ContentImportTest, Int32) { constexpr ArrayDataType kType = ArrayDataType::kInt32; NodeDef node; BuildConstNode({1, 2, 3}, DT_INT32, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5)); RemoveTrailingElements(&node, 4); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0)); } TEST_F(ContentImportTest, Int64) { constexpr ArrayDataType kType = ArrayDataType::kInt64; NodeDef node; BuildConstNode({1, 2, 3}, DT_INT64, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5)); RemoveTrailingElements(&node, 4); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0)); } TEST_F(ContentImportTest, Quint8) { constexpr ArrayDataType kType = ArrayDataType::kUint8; NodeDef node; BuildConstNode({1, 2, 3}, DT_QUINT8, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5)); RemoveTrailingElements(&node, 4); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0)); } TEST_F(ContentImportTest, Bool) { constexpr ArrayDataType kType = ArrayDataType::kBool; NodeDef node; BuildConstNode({1, 2, 3}, DT_BOOL, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 0)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 1)); RemoveTrailingElements(&node, 4); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0)); } TEST_F(ContentImportTest, Float) { constexpr ArrayDataType kType = ArrayDataType::kFloat; NodeDef node; BuildConstNode({1, 2, 3}, DT_FLOAT, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0005)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0004)); RemoveTrailingElements(&node, 4); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000)); } TEST_F(ContentImportTest, Complex64) { constexpr ArrayDataType kType = ArrayDataType::kComplex64; NodeDef node; BuildConstNode({1, 2, 3}, DT_COMPLEX64, 6, &node); using cplx = std::complex<float>; EXPECT_THAT( ImportAndGetData<kType>(node), ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0001, -1.0001), cplx(1.0002, -1.0002), cplx(1.0003, -1.0003), cplx(1.0004, -1.0004), cplx(1.0005, -1.0005))); RemoveTrailingElements(&node, 1); EXPECT_THAT( ImportAndGetData<kType>(node), ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0001, -1.0001), cplx(1.0002, -1.0002), cplx(1.0003, -1.0003), cplx(1.0004, -1.0004), cplx(1.0004, -1.0004))); RemoveTrailingElements(&node, 4); EXPECT_THAT( ImportAndGetData<kType>(node), ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0000, -1.0000), cplx(1.0000, -1.0000), cplx(1.0000, -1.0000), cplx(1.0000, -1.0000), cplx(1.0000, -1.0000))); RemoveTrailingElements(&node, 1); EXPECT_THAT( ImportAndGetData<kType>(node), ElementsAre(std::complex<float>(0.0000, 0.0000), cplx(0.0000, 0.0000), cplx(0.0000, 0.0000), cplx(0.0000, 0.0000), cplx(0.0000, 0.0000), cplx(0.0000, 0.0000))); } std::vector<std::pair<tensorflow::DataType, ArrayDataType>> UnaryTestTypes() { return {{DT_FLOAT, ArrayDataType::kFloat}, {DT_INT32, ArrayDataType::kInt32}, {DT_INT64, ArrayDataType::kInt64}}; } class TensorContentTest : public ::testing::Test { public: template <ArrayDataType T> std::vector<DataType<T>> ImportAndGetData(const NodeDef& node) { Model model; auto status = ImportNode(node, &model); CHECK(status.ok()) << status.message(); const auto& nodearray = model.GetArray("Node1"); return nodearray.GetBuffer<T>().data; } template <class T> void NodeWithTensorContent(std::initializer_list<int64_t> shape, tensorflow::DataType dtype, int64_t num_elements, NodeDef* node) { node->set_op("Const"); node->set_name("Node1"); AttrValue dtype_attr; SetAttrValue(dtype, &dtype_attr); (*node->mutable_attr())["dtype"] = dtype_attr; auto allocated_content = std::make_unique<T[]>(num_elements); tensorflow::TensorProto t; t.set_dtype(dtype); auto* s = t.mutable_tensor_shape(); for (const auto& d : shape) { s->add_dim()->set_size(d); } switch (dtype) { case DT_FLOAT: for (int64_t i = 0; i < num_elements; ++i) { allocated_content[i] = i / 10000.0 + 1; } break; case DT_INT32: for (int64_t i = 0; i < num_elements; ++i) { allocated_content[i] = i % std::numeric_limits<int>::max() + 1; } break; case DT_QUINT8: for (int64_t i = 0; i < num_elements; ++i) { allocated_content[i] = i % std::numeric_limits<uint8_t>::max() + 1; } break; case DT_INT64: for (int64_t i = 0; i < num_elements; ++i) { allocated_content[i] = i + 1; } break; case DT_STRING: break; case DT_BOOL: for (int64_t i = 0; i < num_elements; ++i) { allocated_content[i] = ((i % 2) == 0); } break; default: break; } t.set_tensor_content( std::string(reinterpret_cast<const char*>(allocated_content.get()), num_elements * sizeof(T))); AttrValue value_attr; SetAttrValue(t, &value_attr); (*node->mutable_attr())["value"] = value_attr; allocated_content.reset(); } }; TEST_F(TensorContentTest, Int64) { constexpr ArrayDataType kType = ArrayDataType::kInt64; NodeDef node; NodeWithTensorContent<int64_t>({1, 2, 3}, DT_INT64, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6)); } TEST_F(TensorContentTest, Int32) { constexpr ArrayDataType kType = ArrayDataType::kInt32; NodeDef node; NodeWithTensorContent<int>({1, 2, 3}, DT_INT32, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6)); } TEST_F(TensorContentTest, Float) { constexpr ArrayDataType kType = ArrayDataType::kFloat; NodeDef node; NodeWithTensorContent<float>({1, 2, 3}, DT_FLOAT, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0005)); } TEST_F(TensorContentTest, Quint8) { constexpr ArrayDataType kType = ArrayDataType::kUint8; NodeDef node; NodeWithTensorContent<uint8_t>({1, 2, 3}, DT_QUINT8, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6)); } TEST_F(TensorContentTest, Bool) { constexpr ArrayDataType kType = ArrayDataType::kBool; NodeDef node; NodeWithTensorContent<bool>({1, 2, 3}, DT_BOOL, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 0)); } class TypeImportTest : public ::testing::TestWithParam< std::pair<tensorflow::DataType, ArrayDataType>> { protected: TypeImportTest() {} void BuildUnaryNode(const std::string& op_name, tensorflow::DataType dtype, NodeDef* node) { node->set_op(op_name); node->set_name("Node1"); node->add_input(); node->set_input(0, "Node0"); AttrValue dtype_attr; SetAttrValue(dtype, &dtype_attr); (*node->mutable_attr())["T"] = dtype_attr; } }; TEST_P(TypeImportTest, BasicTypeInference) { NodeDef node; BuildUnaryNode("Atan", GetParam().first, &node); Model model; EXPECT_TRUE(ImportNode(node, &model).ok()); ASSERT_THAT(model.operators.size(), ::testing::Ge(1)); ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported); const TensorFlowUnsupportedOperator* op = static_cast<const TensorFlowUnsupportedOperator*>( model.operators[0].get()); ASSERT_THAT(op->output_data_types, ::testing::ElementsAre(GetParam().second)); } INSTANTIATE_TEST_SUITE_P(BasicTypeInference, TypeImportTest, ::testing::ValuesIn(UnaryTestTypes())); TEST(ImportTest, TypeInferenceWithFixedOutputType) { Model model; EXPECT_TRUE(ImportNode(BuildNode("IsFinite", {{1, 2}, {2, 3}}), &model).ok()); ASSERT_THAT(model.operators.size(), ::testing::Ge(1)); ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported); const TensorFlowUnsupportedOperator* op = static_cast<const TensorFlowUnsupportedOperator*>( model.operators[0].get()); ASSERT_THAT(op->output_data_types, ::testing::ElementsAre(ArrayDataType::kBool)); } TEST(ImportTest, FailedTypeInference) { NodeDef node; node.set_op("Atan"); node.set_name("Node1"); node.add_input(); node.set_input(0, "Node0"); Model model; EXPECT_TRUE(ImportNode(node, &model).ok()); ASSERT_THAT(model.operators.size(), ::testing::Ge(1)); ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported); const TensorFlowUnsupportedOperator* op = static_cast<const TensorFlowUnsupportedOperator*>( model.operators[0].get()); ASSERT_TRUE(op->output_data_types.empty()); } TEST(ImportTest, UnsupportedOpWithOutputShapes) { Model model; EXPECT_TRUE(ImportNode(BuildNode("Atan", {{1, 2}, {2, 3}}), &model).ok()); ASSERT_THAT(model.operators.size(), ::testing::Ge(1)); ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported); const TensorFlowUnsupportedOperator* op = static_cast<const TensorFlowUnsupportedOperator*>( model.operators[0].get()); ASSERT_EQ(op->output_shapes.size(), 2); ASSERT_THAT(op->output_shapes[0].dims(), ::testing::ElementsAre(1, 2)); ASSERT_THAT(op->output_shapes[1].dims(), ::testing::ElementsAre(2, 3)); } TEST(ImportTest, UnsupportedOpWithWildcardOutputShapes) { Model model; EXPECT_TRUE(ImportNode(BuildNode("Atan", {{-1, 2}}), &model).ok()); ASSERT_THAT(model.operators.size(), ::testing::Ge(1)); ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported); const TensorFlowUnsupportedOperator* op = static_cast<const TensorFlowUnsupportedOperator*>( model.operators[0].get()); ASSERT_TRUE(op->output_shapes.empty()); } TEST(ImportTest, UnsupportedOpWithMultipleOutputs) { NodeDef node = BuildNode("ParseExample", {}); { AttrValue value_attr; SetAttrValue(2, &value_attr); (*node.mutable_attr())["Nsparse"] = value_attr; } { AttrValue value_attr; std::vector<tensorflow::DataType> types; types.push_back(tensorflow::DT_FLOAT); types.push_back(tensorflow::DT_STRING); SetAttrValue(types, &value_attr); (*node.mutable_attr())["sparse_types"] = value_attr; } { AttrValue value_attr; std::vector<tensorflow::DataType> types; types.push_back(tensorflow::DT_STRING); types.push_back(tensorflow::DT_FLOAT); types.push_back(tensorflow::DT_INT64); SetAttrValue(types, &value_attr); (*node.mutable_attr())["Tdense"] = value_attr; } Model model; EXPECT_TRUE(ImportFlexNode(node, &model).ok()); ASSERT_THAT(model.operators.size(), ::testing::Ge(1)); ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported); const TensorFlowUnsupportedOperator* op = static_cast<const TensorFlowUnsupportedOperator*>( model.operators[0].get()); ASSERT_EQ(op->outputs.size(), 9); ASSERT_EQ(op->output_data_types.size(), 9); ASSERT_EQ(op->outputs[0], "Node1"); ASSERT_EQ(op->outputs[1], "Node1:1"); ASSERT_EQ(op->output_data_types[0], ArrayDataType::kInt64); ASSERT_EQ(op->output_data_types[1], ArrayDataType::kInt64); ASSERT_EQ(op->outputs[2], "Node1:2"); ASSERT_EQ(op->outputs[3], "Node1:3"); ASSERT_EQ(op->output_data_types[2], ArrayDataType::kFloat); ASSERT_EQ(op->output_data_types[3], ArrayDataType::kString); ASSERT_EQ(op->outputs[4], "Node1:4"); ASSERT_EQ(op->outputs[5], "Node1:5"); ASSERT_EQ(op->output_data_types[4], ArrayDataType::kInt64); ASSERT_EQ(op->output_data_types[5], ArrayDataType::kInt64); ASSERT_EQ(op->outputs[6], "Node1:6"); ASSERT_EQ(op->outputs[7], "Node1:7"); ASSERT_EQ(op->outputs[8], "Node1:8"); ASSERT_EQ(op->output_data_types[6], ArrayDataType::kString); ASSERT_EQ(op->output_data_types[7], ArrayDataType::kFloat); ASSERT_EQ(op->output_data_types[8], ArrayDataType::kInt64); } } } int main(int argc, char** argv) { ::tflite::LogToStderr(); ::testing::InitGoogleTest(&argc, argv); ::toco::port::InitGoogleWasDoneElsewhere(); return RUN_ALL_TESTS(); }
tensorflow::Status ImportFloatArray(const TensorProto& input_tensor, Array* output_array) { CHECK_EQ(input_tensor.dtype(), DT_FLOAT); const auto& input_shape = input_tensor.tensor_shape(); CHECK_LE(input_shape.dim_size(), 6); int input_flat_size; auto status = ImportShape(input_shape.dim(), &input_flat_size, output_array->mutable_shape()); if (!status.ok()) return status; auto& output_float_data = output_array->GetMutableBuffer<ArrayDataType::kFloat>().data; output_float_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0.f); return ImportTensorData<float>(input_tensor, input_flat_size, &output_float_data); }
TEST_F(ContentImportTest, Float) { constexpr ArrayDataType kType = ArrayDataType::kFloat; NodeDef node; BuildConstNode({1, 2, 3}, DT_FLOAT, 6, &node); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0005)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0004)); RemoveTrailingElements(&node, 4); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000)); RemoveTrailingElements(&node, 1); EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000)); }
#include "tensorflow/cc/training/coordinator.h" namespace tensorflow { Coordinator::Coordinator() : Coordinator(std::vector<error::Code>()) {} Coordinator::Coordinator(const std::vector<error::Code>& clean_stop_errors) : should_stop_(false) { if (clean_stop_errors.empty()) { clean_stop_errors_.insert(error::OUT_OF_RANGE); } else { for (const auto& code : clean_stop_errors) { clean_stop_errors_.insert(static_cast<int>(code)); } } } Coordinator::~Coordinator() { RequestStop().IgnoreError(); Join().IgnoreError(); } Status Coordinator::RegisterRunner(std::unique_ptr<RunnerInterface> runner) { { mutex_lock l(mu_); if (should_stop_) { return Status(absl::StatusCode::kFailedPrecondition, "The coordinator has been stopped."); } } mutex_lock l(runners_lock_); runners_.push_back(std::move(runner)); return absl::OkStatus(); } bool Coordinator::AllRunnersStopped() { mutex_lock l(runners_lock_); for (const auto& runner : runners_) { if (runner->IsRunning()) { return false; } } return true; } Status Coordinator::RequestStop() { mutex_lock l(mu_); if (should_stop_) { return Status(absl::StatusCode::kFailedPrecondition, "The Coordinator is not running."); } should_stop_ = true; wait_for_stop_.notify_all(); return absl::OkStatus(); } bool Coordinator::ShouldStop() { mutex_lock l(mu_); return should_stop_; } Status Coordinator::Join() { { mutex_lock l(mu_); if (!should_stop_) { return Status(absl::StatusCode::kFailedPrecondition, "Joining coordinator without requesting to stop."); } } { mutex_lock l(runners_lock_); for (const auto& t : runners_) { ReportStatus(t->Join()); } runners_.clear(); } return GetStatus(); } void Coordinator::ReportStatus(const Status& status) { mutex_lock l(status_lock_); if (status.ok() || !status_.ok() || clean_stop_errors_.count(static_cast<int>(status.code())) > 0) { return; } status_ = status; } Status Coordinator::GetStatus() { mutex_lock l(status_lock_); return status_; } void Coordinator::WaitForStop() { mutex_lock l(mu_); while (!should_stop_) { wait_for_stop_.wait(l); } } Status Coordinator::ExportCostGraph(CostGraphDef* cost_graph) const { mutex_lock l(runners_lock_); for (auto& t : runners_) { Status s = t->ExportCostGraph(cost_graph); if (!s.ok()) { return s; } } return absl::OkStatus(); } }
#include "tensorflow/cc/training/coordinator.h" #include "tensorflow/cc/training/queue_runner.h" #include "tensorflow/core/lib/core/notification.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/blocking_counter.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/error_codes.pb.h" #include "tensorflow/core/public/session.h" namespace tensorflow { namespace { using error::Code; void WaitForStopThread(Coordinator* coord, Notification* about_to_wait, Notification* done) { about_to_wait->Notify(); coord->WaitForStop(); done->Notify(); } TEST(CoordinatorTest, TestStopAndWaitOnStop) { Coordinator coord; EXPECT_EQ(coord.ShouldStop(), false); Notification about_to_wait; Notification done; Env::Default()->SchedClosure( std::bind(&WaitForStopThread, &coord, &about_to_wait, &done)); about_to_wait.WaitForNotification(); Env::Default()->SleepForMicroseconds(1000 * 1000); EXPECT_FALSE(done.HasBeenNotified()); TF_EXPECT_OK(coord.RequestStop()); done.WaitForNotification(); EXPECT_TRUE(coord.ShouldStop()); } class MockQueueRunner : public RunnerInterface { public: explicit MockQueueRunner(Coordinator* coord) { coord_ = coord; join_counter_ = nullptr; thread_pool_.reset(new thread::ThreadPool(Env::Default(), "test-pool", 10)); stopped_ = false; } MockQueueRunner(Coordinator* coord, int* join_counter) : MockQueueRunner(coord) { join_counter_ = join_counter; } void StartCounting(std::atomic<int>* counter, int until, Notification* start = nullptr) { thread_pool_->Schedule( std::bind(&MockQueueRunner::CountThread, this, counter, until, start)); } void StartSettingStatus(const Status& status, BlockingCounter* counter, Notification* start) { thread_pool_->Schedule(std::bind(&MockQueueRunner::SetStatusThread, this, status, counter, start)); } Status Join() override { if (join_counter_ != nullptr) { (*join_counter_)++; } thread_pool_.reset(); return status_; } Status GetStatus() { return status_; } void SetStatus(const Status& status) { status_ = status; } bool IsRunning() const override { return !stopped_; }; void Stop() { stopped_ = true; } private: void CountThread(std::atomic<int>* counter, int until, Notification* start) { if (start != nullptr) start->WaitForNotification(); while (!coord_->ShouldStop() && counter->load() < until) { (*counter)++; Env::Default()->SleepForMicroseconds(10 * 1000); } coord_->RequestStop().IgnoreError(); } void SetStatusThread(const Status& status, BlockingCounter* counter, Notification* start) { start->WaitForNotification(); SetStatus(status); counter->DecrementCount(); } std::unique_ptr<thread::ThreadPool> thread_pool_; Status status_; Coordinator* coord_; int* join_counter_; bool stopped_; }; TEST(CoordinatorTest, TestRealStop) { std::atomic<int> counter(0); Coordinator coord; std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord)); qr1->StartCounting(&counter, 100); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1))); std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord)); qr2->StartCounting(&counter, 100); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2))); while (counter.load() == 0) ; TF_EXPECT_OK(coord.RequestStop()); int temp_counter = counter.load(); Env::Default()->SleepForMicroseconds(1000 * 1000); EXPECT_EQ(temp_counter, counter.load()); TF_EXPECT_OK(coord.Join()); } TEST(CoordinatorTest, TestRequestStop) { Coordinator coord; std::atomic<int> counter(0); Notification start; std::unique_ptr<MockQueueRunner> qr; for (int i = 0; i < 10; i++) { qr.reset(new MockQueueRunner(&coord)); qr->StartCounting(&counter, 10, &start); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr))); } start.Notify(); coord.WaitForStop(); EXPECT_EQ(coord.ShouldStop(), true); EXPECT_EQ(counter.load(), 10); TF_EXPECT_OK(coord.Join()); } TEST(CoordinatorTest, TestJoin) { Coordinator coord; int join_counter = 0; std::unique_ptr<MockQueueRunner> qr1( new MockQueueRunner(&coord, &join_counter)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1))); std::unique_ptr<MockQueueRunner> qr2( new MockQueueRunner(&coord, &join_counter)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2))); TF_EXPECT_OK(coord.RequestStop()); TF_EXPECT_OK(coord.Join()); EXPECT_EQ(join_counter, 2); } TEST(CoordinatorTest, StatusReporting) { Coordinator coord({Code::CANCELLED, Code::OUT_OF_RANGE}); Notification start; BlockingCounter counter(3); std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord)); qr1->StartSettingStatus(Status(absl::StatusCode::kCancelled, ""), &counter, &start); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1))); std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord)); qr2->StartSettingStatus(Status(absl::StatusCode::kInvalidArgument, ""), &counter, &start); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2))); std::unique_ptr<MockQueueRunner> qr3(new MockQueueRunner(&coord)); qr3->StartSettingStatus(Status(absl::StatusCode::kOutOfRange, ""), &counter, &start); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr3))); start.Notify(); counter.Wait(); TF_EXPECT_OK(coord.RequestStop()); EXPECT_EQ(coord.Join().code(), absl::StatusCode::kInvalidArgument); } TEST(CoordinatorTest, JoinWithoutStop) { Coordinator coord; std::unique_ptr<MockQueueRunner> qr(new MockQueueRunner(&coord)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr))); EXPECT_EQ(coord.Join().code(), Code::FAILED_PRECONDITION); } TEST(CoordinatorTest, AllRunnersStopped) { Coordinator coord; MockQueueRunner* qr = new MockQueueRunner(&coord); TF_ASSERT_OK(coord.RegisterRunner(std::unique_ptr<RunnerInterface>(qr))); EXPECT_FALSE(coord.AllRunnersStopped()); qr->Stop(); EXPECT_TRUE(coord.AllRunnersStopped()); } } }
bool Coordinator::ShouldStop() { mutex_lock l(mu_); return should_stop_; }
TEST(CoordinatorTest, TestStopAndWaitOnStop) { Coordinator coord; EXPECT_EQ(coord.ShouldStop(), false); Notification about_to_wait; Notification done; Env::Default()->SchedClosure( std::bind(&WaitForStopThread, &coord, &about_to_wait, &done)); about_to_wait.WaitForNotification(); Env::Default()->SleepForMicroseconds(1000 * 1000); EXPECT_FALSE(done.HasBeenNotified()); TF_EXPECT_OK(coord.RequestStop()); done.WaitForNotification(); EXPECT_TRUE(coord.ShouldStop()); } TEST(CoordinatorTest, TestRealStop) { std::atomic<int> counter(0); Coordinator coord; std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord)); qr1->StartCounting(&counter, 100); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1))); std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord)); qr2->StartCounting(&counter, 100); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2))); while (counter.load() == 0) ; TF_EXPECT_OK(coord.RequestStop()); int temp_counter = counter.load(); Env::Default()->SleepForMicroseconds(1000 * 1000); EXPECT_EQ(temp_counter, counter.load()); TF_EXPECT_OK(coord.Join()); }
#include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h" #include <cstdint> #include <cstring> #include <iterator> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/TypeSwitch.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/Support/LLVM.h" #include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h" #include "tensorflow/core/tfrt/mlrt/bytecode/executable.h" namespace mlrt { namespace { bool CanBeInlined(mlir::Attribute attr, absl::string_view data) { return mlir::isa<mlir::IntegerAttr, mlir::FloatAttr, mlir::FlatSymbolRefAttr>( attr) && data.size() <= sizeof(uint32_t); } template <typename T> std::string EncodeIntegerOrFloat(T attr) { std::string data(sizeof(attr), '\0'); std::memcpy(data.data(), &attr, sizeof(attr)); return data; } template <typename T> std::optional<std::string> EncodeListOfInteger(mlir::ArrayAttr array) { bc::Buffer buffer; bc::Allocator allocator(&buffer); auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size()); mlir::Type type; for (int i = 0; i < array.size(); ++i) { if (auto integer_attr = mlir::dyn_cast<mlir::IntegerAttr>(array[i])) { if (type && integer_attr.getType() != type) return std::nullopt; type = integer_attr.getType(); llvm::APInt value = integer_attr.getValue(); if (value.getBitWidth() != sizeof(T) * 8) return std::nullopt; ctor.ConstructAt(i, value.getZExtValue()); } else { return std::nullopt; } } return std::string(buffer.data(), buffer.size()); } std::optional<std::string> EncodeListOfSymbolRef( const ModuleEmitterContext& module_context, mlir::ArrayAttr array) { bc::Buffer buffer; bc::Allocator allocator(&buffer); auto ctor = bc::New<bc::Vector<uint32_t>>(&allocator, array.size()); for (int i = 0; i < array.size(); ++i) { if (auto symbol_ref = mlir::dyn_cast<mlir::FlatSymbolRefAttr>(array[i])) { ctor.ConstructAt(i, module_context.GetFunctionId(symbol_ref.getValue())); } else { return std::nullopt; } } return std::string(buffer.data(), buffer.size()); } template <typename T> std::optional<std::string> EncodeDenseArray(llvm::ArrayRef<T> array) { bc::Buffer buffer; bc::Allocator allocator(&buffer); auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size()); if (!array.empty()) { ctor.Place(reinterpret_cast<const char*>(array.data()), array.size() * sizeof(T)); } return std::string(buffer.data(), buffer.size()); } std::optional<std::string> EncodeDenseBoolArray(llvm::ArrayRef<bool> array) { bc::Buffer buffer; bc::Allocator allocator(&buffer); auto ctor = bc::New<bc::Vector<uint8_t>>(&allocator, array.size()); if (!array.empty()) { std::vector<uint8_t> data(array.size()); int i = 0; for (auto v : array) { data[i++] = static_cast<uint8_t>(v); } ctor.Place(reinterpret_cast<const char*>(data.data()), data.size()); } return std::string(buffer.data(), buffer.size()); } std::optional<std::string> EncodeListOfString(mlir::ArrayAttr array) { bc::Buffer buffer; bc::Allocator allocator(&buffer); auto ctor = bc::New<bc::Vector<bc::String>>(&allocator, array.size()); for (int i = 0; i < array.size(); ++i) { if (auto string_attr = mlir::dyn_cast<mlir::StringAttr>(array[i])) { ctor.ConstructAt(i, string_attr.getValue().str()); } else { return std::nullopt; } } return std::string(buffer.data(), buffer.size()); } struct FunctionEmitterContext { explicit FunctionEmitterContext(const ModuleEmitterContext* module_context) : module_context(*module_context) {} const ModuleEmitterContext& module_context; struct RegInfo { int num_uses = 0; int id = -1; }; int next_reg_id = 0; llvm::DenseMap<mlir::Value, RegInfo> register_table; std::vector<int> free_regs; int AssignRegId() { if (free_regs.empty()) { return next_reg_id++; } int id = free_regs.back(); free_regs.pop_back(); return id; } void FreeRegId(int id) { free_regs.push_back(id); } }; void EmitKernel(FunctionEmitterContext& function_context, bc::Kernel::Constructor& constructor, mlir::Operation& op, std::vector<uint32_t>& function_output_regs, std::vector<uint8_t>& function_output_last_uses) { std::vector<uint32_t> results; results.reserve(op.getNumResults()); for (auto result : op.getResults()) { auto iter = function_context.register_table.find(result); CHECK(iter != function_context.register_table.end()); CHECK_EQ(iter->second.id, -1); iter->second.id = function_context.AssignRegId(); results.push_back(iter->second.id); } constructor.construct_results(results.size()) .Assign(results.begin(), results.end()); std::vector<uint32_t> arguments; std::vector<uint8_t> last_uses; arguments.reserve(op.getNumOperands()); last_uses.reserve(op.getNumOperands()); for (auto operand : op.getOperands()) { auto iter = function_context.register_table.find(operand); CHECK(iter != function_context.register_table.end()); int id = iter->second.id; CHECK_NE(id, -1); last_uses.push_back(0); if (--iter->second.num_uses == 0) { function_context.FreeRegId(id); last_uses.back() = 1; } arguments.push_back(id); } constructor.construct_arguments(arguments.size()) .Assign(arguments.begin(), arguments.end()); constructor.construct_last_uses(last_uses.size()) .Assign(last_uses.begin(), last_uses.end()); std::vector<uint32_t> attributes; attributes.reserve(op.getAttrs().size()); for (auto attr : op.getAttrs()) { int attr_id = function_context.module_context.GetAttributeId(attr.getValue()); absl::string_view attr_data = function_context.module_context.attributes().at(attr_id); if (CanBeInlined(attr.getValue(), attr_data)) { uint32_t data = 0; std::memcpy(&data, attr_data.data(), attr_data.size()); attributes.push_back(data); } else { attributes.push_back(attr_id); } } constructor.construct_attributes(attributes.size()) .Assign(attributes.begin(), attributes.end()); if (llvm::isa<mlir::func::ReturnOp>(&op)) { constructor.set_code(function_context.module_context.GetKernelId("return")); function_output_regs = std::move(arguments); function_output_last_uses = std::move(last_uses); } else if (llvm::isa<mlir::func::CallOp>(&op)) { constructor.set_code(function_context.module_context.GetKernelId("call")); } else { llvm::StringRef op_name = op.getName().getStringRef(); constructor.set_code(function_context.module_context.GetKernelId(op_name)); } } void EmitFunction(const ModuleEmitterContext& module_context, bc::Function::Constructor& constructor, llvm::StringRef name, mlir::Region& region) { FunctionEmitterContext function_context(&module_context); constructor.construct_name(name.str()); DCHECK(llvm::hasSingleElement(region)) << "should have a single block"; auto& block = region.front(); auto& register_table = function_context.register_table; std::vector<uint32_t> input_regs; input_regs.reserve(block.getNumArguments()); for (auto arg : block.getArguments()) { int id = function_context.AssignRegId(); input_regs.push_back(id); register_table[arg] = {static_cast<int>(std::distance(arg.getUses().begin(), arg.getUses().end())), id}; } constructor.construct_input_regs(input_regs); for (auto& op : block) { for (auto result : op.getResults()) { register_table[result] = {static_cast<int>( std::distance(result.getUses().begin(), result.getUses().end()))}; } } auto kernels_constructor = constructor.construct_kernels(block.getOperations().size()); std::vector<uint32_t> output_regs; std::vector<uint8_t> output_last_uses; for (const auto& iter : llvm::enumerate(block.getOperations())) { int i = iter.index(); mlir::Operation& op = iter.value(); auto kernel_ctor = kernels_constructor.ConstructAt(i); EmitKernel(function_context, kernel_ctor, op, output_regs, output_last_uses); } constructor.set_num_regs(function_context.next_reg_id); constructor.construct_output_regs(output_regs); constructor.construct_output_last_uses(output_last_uses); } absl::Status EmitExecutable(ModuleEmitterContext& module_context, bc::Executable::Constructor& constructor, mlir::ModuleOp module) { module.walk( [&](mlir::func::FuncOp func) { module_context.AddFunction(func); }); auto functions = module_context.functions(); for (auto func : functions) { if (!llvm::hasSingleElement(func.getRegion())) { return absl::InvalidArgumentError("function should have a single block."); } auto& block = func.getRegion().front(); for (auto& op : block) { if (llvm::isa<mlir::func::CallOp>(&op)) { module_context.AddKernelName("call"); } else if (llvm::isa<mlir::func::ReturnOp>(&op)) { if (op.getNumResults() != 0) { return absl::InvalidArgumentError( "Block terminator must be a return op."); } module_context.AddKernelName("return"); } else { module_context.AddKernelName(op.getName().getStringRef().str()); } for (auto attr : op.getAttrs()) { if (auto status = module_context.AddAttribute(&op, attr.getValue()); !status.ok()) { return status; } } } } constructor.construct_kernel_names(module_context.kernels().size()) .Assign(module_context.kernels().begin(), module_context.kernels().end()); auto functions_constructor = constructor.construct_functions(functions.size()); for (int i = 0; i < functions.size(); ++i) { auto func = functions[i]; auto function_ctor = functions_constructor.ConstructAt(i); EmitFunction(module_context, function_ctor, func.getSymName(), func.getRegion()); } constructor.construct_attributes(module_context.attributes().size()) .Assign(module_context.attributes().begin(), module_context.attributes().end()); return absl::OkStatus(); } } absl::Status ModuleEmitterContext::AddAttribute(mlir::Operation* op, mlir::Attribute attr) { absl::StatusOr<std::string> attr_data; if (auto* encoder = attribute_encoder_registry_.Get( op->getName().getDialectNamespace())) { attr_data = (*encoder)(*this, attr); } else { attr_data = DefaultEncodeAttribute(attr); } if (!attr_data.ok()) return std::move(attr_data).status(); int id = AddData(std::move(*attr_data), attributes_, attribute_data_id_map_); attribute_id_map_[attr] = id; return absl::OkStatus(); } int ModuleEmitterContext::AddFunction(mlir::func::FuncOp func) { int id = functions_.size(); functions_.push_back(func); DCHECK(!function_name_id_map_.contains(func.getSymName())); function_name_id_map_[func.getSymName()] = id; return id; } std::optional<std::string> EncodeSimpleAttribute( const ModuleEmitterContext& module_context, mlir::Attribute attr) { return llvm::TypeSwitch<mlir::Attribute, std::optional<std::string>>(attr) .Case<mlir::StringAttr>( [](const auto& str_attr) { return str_attr.str(); }) .Case<mlir::IntegerAttr>( [](const auto& integer_attr) -> std::optional<std::string> { switch (llvm::APInt value = integer_attr.getValue(); value.getBitWidth()) { case 1: return EncodeIntegerOrFloat<uint8_t>(value.getZExtValue()); case 32: return EncodeIntegerOrFloat<uint32_t>(value.getZExtValue()); case 64: return EncodeIntegerOrFloat<uint64_t>(value.getZExtValue()); default: return std::nullopt; } }) .Case<mlir::FloatAttr>( [](const auto& float_attr) -> std::optional<std::string> { llvm::APFloat value = float_attr.getValue(); if (float_attr.getType().isF32()) { return EncodeIntegerOrFloat<float>(value.convertToFloat()); } return std::nullopt; }) .Case<mlir::ArrayAttr>([&](const auto& array_attr) -> std::optional<std::string> { if (auto encoded_list_i32 = EncodeListOfInteger<uint32_t>(array_attr)) { return std::move(*encoded_list_i32); } else if (auto encoded_list_i64 = EncodeListOfInteger<uint64_t>(array_attr)) { return std::move(*encoded_list_i64); } else if (auto encoded_list_string = EncodeListOfString(array_attr)) { return std::move(*encoded_list_string); } else if (auto encoded_list_symbol_ref = EncodeListOfSymbolRef(module_context, array_attr)) { return std::move(*encoded_list_symbol_ref); } else { return std::nullopt; } }) .Case<mlir::DenseI32ArrayAttr>( [](const auto& dense_array_i32) -> std::optional<std::string> { return EncodeDenseArray<int32_t>(dense_array_i32); }) .Case<mlir::DenseI64ArrayAttr>( [](const auto& dense_array_i64) -> std::optional<std::string> { return EncodeDenseArray<int64_t>(dense_array_i64); }) .Case<mlir::DenseBoolArrayAttr>( [](const auto& dense_array_bool) -> std::optional<std::string> { return EncodeDenseBoolArray(dense_array_bool.asArrayRef()); }) .Case<mlir::FlatSymbolRefAttr>([&](const auto& symbol_ref) { return EncodeIntegerOrFloat<uint32_t>( module_context.GetFunctionId(symbol_ref.getValue())); }) .Default([](const auto& attr) { return std::nullopt; }); } absl::StatusOr<std::string> ModuleEmitterContext::DefaultEncodeAttribute( mlir::Attribute attr) { if (auto result = EncodeSimpleAttribute(*this, attr)) { return std::move(*result); } std ::string attr_str; llvm::raw_string_ostream os(attr_str); attr.print(os); return absl::InvalidArgumentError( absl::StrCat("Try to encode unsupported attribute: ", attr_str)); } absl::StatusOr<bc::Buffer> EmitExecutable( const AttributeEncoderRegistry& attribute_encoder_registry, mlir::ModuleOp module) { bc::Buffer buffer; bc::Allocator allocator(&buffer); ModuleEmitterContext module_context(&attribute_encoder_registry); auto executable_ctor = bc::New<bc::Executable>(&allocator); if (auto status = EmitExecutable(module_context, executable_ctor, module); !status.ok()) { return status; } buffer.shrink_to_fit(); return buffer; } }
#include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h" #include <cstring> #include <string> #include <vector> #include <gmock/gmock.h> #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Parser/Parser.h" #include "mlir/Support/LLVM.h" #include "tensorflow/core/tfrt/mlrt/bytecode/executable.h" #include "tensorflow/core/tfrt/mlrt/interpreter/attribute_span.h" #include "tsl/platform/resource_loader.h" #include "tsl/platform/status_matchers.h" namespace mlrt { namespace { using ::testing::ElementsAreArray; using ::testing::FloatEq; using ::testing::IsEmpty; using ::tsl::testing::IsOkAndHolds; using ::tsl::testing::StatusIs; TEST(MlirToByteCodeTest, Basic) { constexpr char kBasicMlir[] = "tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/basic.mlir"; mlir::DialectRegistry registry; registry.insert<mlir::func::FuncDialect>(); mlir::MLIRContext mlir_context(registry); mlir_context.allowUnregisteredDialects(); auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>( tsl::GetDataDependencyFilepath(kBasicMlir), &mlir_context); AttributeEncoderRegistry attribute_encoder_registry; bc::Buffer buffer = EmitExecutable(attribute_encoder_registry, mlir_module.get()).value(); bc::Executable executable(buffer.data()); auto kernel_names = executable.kernel_names(); EXPECT_THAT(kernel_names, ElementsAreArray({"test_mlbc.add.i32", "test_mlbc.sub.i32", "call", "return"})); auto functions = executable.functions(); ASSERT_GE(functions.size(), 1); auto function = functions[0]; EXPECT_EQ(function.name().str(), "add_i32_10"); EXPECT_EQ(function.num_regs(), 5); EXPECT_THAT(function.input_regs(), ElementsAreArray({0})); EXPECT_THAT(function.output_regs(), ElementsAreArray({0, 2, 2})); EXPECT_THAT(function.output_last_uses(), ElementsAreArray({true, false, true})); auto kernels = function.kernels(); ASSERT_EQ(kernels.size(), 11); EXPECT_EQ(kernels[0].code(), 0); EXPECT_THAT(kernels[0].arguments(), ElementsAreArray({0, 0})); EXPECT_THAT(kernels[0].results(), ElementsAreArray({1})); EXPECT_THAT(kernels[0].last_uses(), ElementsAreArray({0, 0})); for (int i = 1; i < 9; i++) { EXPECT_EQ(kernels[i].code(), i % 2); EXPECT_THAT(kernels[i].arguments(), ElementsAreArray({(i - 1) % 2 + 1, 0})); EXPECT_THAT(kernels[i].results(), ElementsAreArray({i % 2 + 1})); EXPECT_THAT(kernels[i].last_uses(), ElementsAreArray({1, 0})); } EXPECT_EQ(kernels[9].code(), 2); EXPECT_THAT(kernels[9].arguments(), ElementsAreArray({1})); EXPECT_THAT(kernels[9].last_uses(), ElementsAreArray({true})); EXPECT_THAT(kernels[9].results(), ElementsAreArray({2, 3, 4})); EXPECT_EQ(kernels[10].code(), 3); EXPECT_THAT(kernels[10].arguments(), ElementsAreArray({0, 2, 2})); EXPECT_THAT(kernels[10].last_uses(), ElementsAreArray({true, false, true})); EXPECT_TRUE(kernels[10].results().empty()); } template <typename T> absl::StatusOr<T> DecodeAttribute(absl::string_view data) { if (data.size() < sizeof(T)) return absl::InvalidArgumentError("Invalid data size for attribute."); T value; std::memcpy(&value, data.data(), sizeof(T)); return value; } TEST(MlirToByteCodeTest, BasicAttributes) { constexpr char kBasicAttributesMlir[] = "tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/" "basic_attributes.mlir"; mlir::DialectRegistry registry; registry.insert<mlir::func::FuncDialect>(); mlir::MLIRContext mlir_context(registry); mlir_context.allowUnregisteredDialects(); auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>( tsl::GetDataDependencyFilepath(kBasicAttributesMlir), &mlir_context); AttributeEncoderRegistry attribute_encoder_registry; bc::Buffer buffer = EmitExecutable(attribute_encoder_registry, mlir_module.get()).value(); bc::Executable executable(buffer.data()); auto attributes = executable.attributes(); ASSERT_EQ(attributes.size(), 15); auto attr_iter = attributes.begin(); EXPECT_EQ(*attr_iter, "test string"); ++attr_iter; EXPECT_EQ(*attr_iter, "ts"); ++attr_iter; EXPECT_THAT(DecodeAttribute<int32_t>(*attr_iter), IsOkAndHolds(100)); ++attr_iter; EXPECT_THAT(DecodeAttribute<int64_t>(*attr_iter), IsOkAndHolds(200)); ++attr_iter; EXPECT_THAT(DecodeAttribute<float>(*attr_iter), IsOkAndHolds(FloatEq(3.0))); ++attr_iter; EXPECT_THAT(DecodeAttribute<uint8_t>(*attr_iter), IsOkAndHolds(0)); ++attr_iter; bc::Vector<int64_t> list_of_i64((*attr_iter).data()); EXPECT_THAT(list_of_i64, ElementsAreArray({0, 1, 2, 3, 4})); ++attr_iter; bc::Vector<int32_t> list_of_i32((*attr_iter).data()); EXPECT_THAT(list_of_i32, ElementsAreArray({0, 1, 2, 3})); ++attr_iter; bc::Vector<bc::String> list_of_str((*attr_iter).data()); EXPECT_THAT(list_of_str, ElementsAreArray({"string 0", "string 1"})); ++attr_iter; EXPECT_THAT(DecodeAttribute<uint32_t>(*attr_iter), IsOkAndHolds(1)); EXPECT_EQ(executable.functions()[1].name().Get(), "callee"); ++attr_iter; bc::Vector<int32_t> list_of_symbol_ref((*attr_iter).data()); EXPECT_EQ(executable.functions()[2].name().Get(), "callee0"); EXPECT_EQ(executable.functions()[3].name().Get(), "callee1"); EXPECT_THAT(list_of_symbol_ref, ElementsAreArray({2, 3})); ++attr_iter; bc::Vector<int32_t> dense_array_of_i32((*attr_iter).data()); EXPECT_THAT(dense_array_of_i32, ElementsAreArray({0, 1, 2})); ++attr_iter; bc::Vector<int64_t> dense_array_of_i64((*attr_iter).data()); EXPECT_THAT(dense_array_of_i64, ElementsAreArray({0, 1, 2})); ++attr_iter; bc::Vector<int32_t> empty_dense_array((*attr_iter).data()); EXPECT_TRUE(empty_dense_array.empty()); ++attr_iter; bc::Vector<uint8_t> dense_array_of_bool((*attr_iter).data()); EXPECT_THAT(dense_array_of_bool, ElementsAreArray({true, false})); auto kernels = executable.functions()[0].kernels(); ASSERT_EQ(kernels.size(), 16); auto kernel_iter = kernels.begin(); auto attribute_span = [&](auto kernel_iter) { return mlrt::AttributeSpan((*kernel_iter).attributes(), attributes); }; EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(), "test string"); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(), "ts"); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<int32_t>(0), 100); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<int64_t>(0), 200); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<float>(0), FloatEq(3.0)); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint8_t>(0), false); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0), ElementsAreArray({0, 1, 2, 3, 4})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0), ElementsAreArray({0, 1, 2, 3})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bc::String>>(0), ElementsAreArray({"string 0", "string 1"})); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint32_t>(0), 1); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0), ElementsAreArray({2, 3})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0), ElementsAreArray({0, 1, 2})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0), ElementsAreArray({0, 1, 2})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0), IsEmpty()); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bool>>(0), ElementsAreArray({true, false})); } TEST(MlirToByteCodeTest, UnsupportedAttributes) { constexpr char kUnsupportedAttributesMlir[] = "tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/" "unsupported_attributes.mlir"; mlir::DialectRegistry registry; registry.insert<mlir::func::FuncDialect>(); mlir::MLIRContext mlir_context(registry); mlir_context.allowUnregisteredDialects(); auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>( tsl::GetDataDependencyFilepath(kUnsupportedAttributesMlir), &mlir_context); AttributeEncoderRegistry attribute_encoder_registry; EXPECT_THAT(EmitExecutable(attribute_encoder_registry, mlir_module.get()), StatusIs(absl::StatusCode::kInvalidArgument, "Try to encode unsupported attribute: unit")); } class CustomDense { public: struct StorageType { using Self = StorageType; DEFINE_BYTECODE_FIELD(bc::Vector<int64_t>, shape); DEFINE_BYTECODE_FIELD(bc::Vector<uint32_t>, data); }; class Constructor { public: Constructor(bc::Allocator* allocator, bc::BcAddr_t address) : allocator_(allocator), address_(address) {} template <typename... Args> auto construct_shape(Args&&... args) { return StorageType::construct_shape(allocator_, address_, std::forward<Args>(args)...); } template <typename... Args> auto construct_data(Args&&... args) { return StorageType::construct_data(allocator_, address_, std::forward<Args>(args)...); } bc::BcAddr_t address() const { return address_; } private: bc::Allocator* allocator_; bc::BcAddr_t address_; }; using NonTrivialConstructorType = Constructor; explicit CustomDense(const char* p) : p_(p) {} bc::Vector<int64_t> shape() const { return StorageType::read_shape(p_); } bc::Vector<uint32_t> data() const { return StorageType::read_data(p_); } private: const char* p_ = nullptr; }; absl::StatusOr<std::string> EncodeCustomDense(const ModuleEmitterContext&, mlir::Attribute attr) { auto dense_int_attr = mlir::dyn_cast<mlir::DenseIntElementsAttr>(attr); if (!dense_int_attr) return absl::InvalidArgumentError( "The element of the custom dense attribute must be an integer."); if (mlir::cast<mlir::IntegerType>(dense_int_attr.getElementType()) .getWidth() != 32) { return absl::InvalidArgumentError( "The element of the custom dense attribute must be an i32 integer."); } bc::Buffer buffer; bc::Allocator allocator(&buffer); auto custom_dense_ctor = bc::New<CustomDense>(&allocator); auto shaped_type = dense_int_attr.getType(); std::vector<int64_t> shape(shaped_type.getShape().begin(), shaped_type.getShape().end()); custom_dense_ctor.construct_shape(shape); custom_dense_ctor.construct_data(shaped_type.getNumElements()) .Place(dense_int_attr.getRawData().data(), dense_int_attr.getRawData().size()); return std::string(buffer.data(), buffer.size()); } TEST(MlirToByteCodeTest, CustomDense) { constexpr char kCustomAttributesMlir[] = "tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/" "custom_attributes.mlir"; mlir::DialectRegistry registry; registry.insert<mlir::func::FuncDialect>(); mlir::MLIRContext mlir_context(registry); mlir_context.allowUnregisteredDialects(); auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>( tsl::GetDataDependencyFilepath(kCustomAttributesMlir), &mlir_context); AttributeEncoderRegistry attribute_encoder_registry; attribute_encoder_registry.Register("test_custom", &EncodeCustomDense); bc::Buffer buffer = EmitExecutable(attribute_encoder_registry, mlir_module.get()).value(); bc::Executable executable(buffer.data()); auto attributes = executable.attributes(); ASSERT_EQ(attributes.size(), 10); for (int i = 0; i < 10; ++i) { bc::String attr_data = attributes[i]; CustomDense custom_dense(attr_data.data()); EXPECT_THAT(custom_dense.shape(), ElementsAreArray({1})); EXPECT_THAT(custom_dense.data(), ElementsAreArray({i})); } } } }
void EmitKernel(FunctionEmitterContext& function_context, bc::Kernel::Constructor& constructor, mlir::Operation& op, std::vector<uint32_t>& function_output_regs, std::vector<uint8_t>& function_output_last_uses) { std::vector<uint32_t> results; results.reserve(op.getNumResults()); for (auto result : op.getResults()) { auto iter = function_context.register_table.find(result); CHECK(iter != function_context.register_table.end()); CHECK_EQ(iter->second.id, -1); iter->second.id = function_context.AssignRegId(); results.push_back(iter->second.id); } constructor.construct_results(results.size()) .Assign(results.begin(), results.end()); std::vector<uint32_t> arguments; std::vector<uint8_t> last_uses; arguments.reserve(op.getNumOperands()); last_uses.reserve(op.getNumOperands()); for (auto operand : op.getOperands()) { auto iter = function_context.register_table.find(operand); CHECK(iter != function_context.register_table.end()); int id = iter->second.id; CHECK_NE(id, -1); last_uses.push_back(0); if (--iter->second.num_uses == 0) { function_context.FreeRegId(id); last_uses.back() = 1; } arguments.push_back(id); } constructor.construct_arguments(arguments.size()) .Assign(arguments.begin(), arguments.end()); constructor.construct_last_uses(last_uses.size()) .Assign(last_uses.begin(), last_uses.end()); std::vector<uint32_t> attributes; attributes.reserve(op.getAttrs().size()); for (auto attr : op.getAttrs()) { int attr_id = function_context.module_context.GetAttributeId(attr.getValue()); absl::string_view attr_data = function_context.module_context.attributes().at(attr_id); if (CanBeInlined(attr.getValue(), attr_data)) { uint32_t data = 0; std::memcpy(&data, attr_data.data(), attr_data.size()); attributes.push_back(data); } else { attributes.push_back(attr_id); } } constructor.construct_attributes(attributes.size()) .Assign(attributes.begin(), attributes.end()); if (llvm::isa<mlir::func::ReturnOp>(&op)) { constructor.set_code(function_context.module_context.GetKernelId("return")); function_output_regs = std::move(arguments); function_output_last_uses = std::move(last_uses); } else if (llvm::isa<mlir::func::CallOp>(&op)) { constructor.set_code(function_context.module_context.GetKernelId("call")); } else { llvm::StringRef op_name = op.getName().getStringRef(); constructor.set_code(function_context.module_context.GetKernelId(op_name)); } }
TEST(MlirToByteCodeTest, Basic) { constexpr char kBasicMlir[] = "tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/basic.mlir"; mlir::DialectRegistry registry; registry.insert<mlir::func::FuncDialect>(); mlir::MLIRContext mlir_context(registry); mlir_context.allowUnregisteredDialects(); auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>( tsl::GetDataDependencyFilepath(kBasicMlir), &mlir_context); AttributeEncoderRegistry attribute_encoder_registry; bc::Buffer buffer = EmitExecutable(attribute_encoder_registry, mlir_module.get()).value(); bc::Executable executable(buffer.data()); auto kernel_names = executable.kernel_names(); EXPECT_THAT(kernel_names, ElementsAreArray({"test_mlbc.add.i32", "test_mlbc.sub.i32", "call", "return"})); auto functions = executable.functions(); ASSERT_GE(functions.size(), 1); auto function = functions[0]; EXPECT_EQ(function.name().str(), "add_i32_10"); EXPECT_EQ(function.num_regs(), 5); EXPECT_THAT(function.input_regs(), ElementsAreArray({0})); EXPECT_THAT(function.output_regs(), ElementsAreArray({0, 2, 2})); EXPECT_THAT(function.output_last_uses(), ElementsAreArray({true, false, true})); auto kernels = function.kernels(); ASSERT_EQ(kernels.size(), 11); EXPECT_EQ(kernels[0].code(), 0); EXPECT_THAT(kernels[0].arguments(), ElementsAreArray({0, 0})); EXPECT_THAT(kernels[0].results(), ElementsAreArray({1})); EXPECT_THAT(kernels[0].last_uses(), ElementsAreArray({0, 0})); for (int i = 1; i < 9; i++) { EXPECT_EQ(kernels[i].code(), i % 2); EXPECT_THAT(kernels[i].arguments(), ElementsAreArray({(i - 1) % 2 + 1, 0})); EXPECT_THAT(kernels[i].results(), ElementsAreArray({i % 2 + 1})); EXPECT_THAT(kernels[i].last_uses(), ElementsAreArray({1, 0})); } EXPECT_EQ(kernels[9].code(), 2); EXPECT_THAT(kernels[9].arguments(), ElementsAreArray({1})); EXPECT_THAT(kernels[9].last_uses(), ElementsAreArray({true})); EXPECT_THAT(kernels[9].results(), ElementsAreArray({2, 3, 4})); EXPECT_EQ(kernels[10].code(), 3); EXPECT_THAT(kernels[10].arguments(), ElementsAreArray({0, 2, 2})); EXPECT_THAT(kernels[10].last_uses(), ElementsAreArray({true, false, true})); EXPECT_TRUE(kernels[10].results().empty()); } TEST(MlirToByteCodeTest, BasicAttributes) { constexpr char kBasicAttributesMlir[] = "tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/" "basic_attributes.mlir"; mlir::DialectRegistry registry; registry.insert<mlir::func::FuncDialect>(); mlir::MLIRContext mlir_context(registry); mlir_context.allowUnregisteredDialects(); auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>( tsl::GetDataDependencyFilepath(kBasicAttributesMlir), &mlir_context); AttributeEncoderRegistry attribute_encoder_registry; bc::Buffer buffer = EmitExecutable(attribute_encoder_registry, mlir_module.get()).value(); bc::Executable executable(buffer.data()); auto attributes = executable.attributes(); ASSERT_EQ(attributes.size(), 15); auto attr_iter = attributes.begin(); EXPECT_EQ(*attr_iter, "test string"); ++attr_iter; EXPECT_EQ(*attr_iter, "ts"); ++attr_iter; EXPECT_THAT(DecodeAttribute<int32_t>(*attr_iter), IsOkAndHolds(100)); ++attr_iter; EXPECT_THAT(DecodeAttribute<int64_t>(*attr_iter), IsOkAndHolds(200)); ++attr_iter; EXPECT_THAT(DecodeAttribute<float>(*attr_iter), IsOkAndHolds(FloatEq(3.0))); ++attr_iter; EXPECT_THAT(DecodeAttribute<uint8_t>(*attr_iter), IsOkAndHolds(0)); ++attr_iter; bc::Vector<int64_t> list_of_i64((*attr_iter).data()); EXPECT_THAT(list_of_i64, ElementsAreArray({0, 1, 2, 3, 4})); ++attr_iter; bc::Vector<int32_t> list_of_i32((*attr_iter).data()); EXPECT_THAT(list_of_i32, ElementsAreArray({0, 1, 2, 3})); ++attr_iter; bc::Vector<bc::String> list_of_str((*attr_iter).data()); EXPECT_THAT(list_of_str, ElementsAreArray({"string 0", "string 1"})); ++attr_iter; EXPECT_THAT(DecodeAttribute<uint32_t>(*attr_iter), IsOkAndHolds(1)); EXPECT_EQ(executable.functions()[1].name().Get(), "callee"); ++attr_iter; bc::Vector<int32_t> list_of_symbol_ref((*attr_iter).data()); EXPECT_EQ(executable.functions()[2].name().Get(), "callee0"); EXPECT_EQ(executable.functions()[3].name().Get(), "callee1"); EXPECT_THAT(list_of_symbol_ref, ElementsAreArray({2, 3})); ++attr_iter; bc::Vector<int32_t> dense_array_of_i32((*attr_iter).data()); EXPECT_THAT(dense_array_of_i32, ElementsAreArray({0, 1, 2})); ++attr_iter; bc::Vector<int64_t> dense_array_of_i64((*attr_iter).data()); EXPECT_THAT(dense_array_of_i64, ElementsAreArray({0, 1, 2})); ++attr_iter; bc::Vector<int32_t> empty_dense_array((*attr_iter).data()); EXPECT_TRUE(empty_dense_array.empty()); ++attr_iter; bc::Vector<uint8_t> dense_array_of_bool((*attr_iter).data()); EXPECT_THAT(dense_array_of_bool, ElementsAreArray({true, false})); auto kernels = executable.functions()[0].kernels(); ASSERT_EQ(kernels.size(), 16); auto kernel_iter = kernels.begin(); auto attribute_span = [&](auto kernel_iter) { return mlrt::AttributeSpan((*kernel_iter).attributes(), attributes); }; EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(), "test string"); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(), "ts"); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<int32_t>(0), 100); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<int64_t>(0), 200); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<float>(0), FloatEq(3.0)); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint8_t>(0), false); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0), ElementsAreArray({0, 1, 2, 3, 4})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0), ElementsAreArray({0, 1, 2, 3})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bc::String>>(0), ElementsAreArray({"string 0", "string 1"})); ++kernel_iter; EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint32_t>(0), 1); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0), ElementsAreArray({2, 3})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0), ElementsAreArray({0, 1, 2})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0), ElementsAreArray({0, 1, 2})); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0), IsEmpty()); ++kernel_iter; EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bool>>(0), ElementsAreArray({true, false})); }
#ifndef AROLLA_OPERATORS_CORE_CAST_OPERATOR_H_ #define AROLLA_OPERATORS_CORE_CAST_OPERATOR_H_ #include <cstdint> #include <limits> #include <tuple> #include <type_traits> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "arolla/memory/optional_value.h" #include "arolla/util/meta.h" #include "arolla/util/repr.h" namespace arolla { template <typename DST> struct CastOp { using run_on_missing = std::true_type; using DstTypes = meta::type_list<bool, int32_t, int64_t, uint64_t, float, double>; using SrcTypes = meta::type_list<bool, int32_t, int64_t, uint64_t, float, double>; static_assert(meta::contains_v<DstTypes, DST>); template <typename SRC> static constexpr SRC max_float_to_int_safe_value() { using dst_limits = std::numeric_limits<DST>; using src_limits = std::numeric_limits<SRC>; static_assert(dst_limits::is_integer); static_assert(std::is_floating_point_v<SRC>); SRC result = 0; int i = 0; for (; i < src_limits::digits; ++i) { result *= 2; result += 1; } for (; i < dst_limits::digits; ++i) { result *= 2; } for (; i > dst_limits::digits; --i) { result /= 2; } return result; } template <typename SRC> static constexpr SRC min_float_to_int_safe_value() { using dst_limits = std::numeric_limits<DST>; using src_limits = std::numeric_limits<SRC>; static_assert(dst_limits::is_integer); static_assert(std::is_floating_point_v<SRC>); if constexpr (!dst_limits::is_signed) { return 0.0; } else { SRC result = 1; int i = 0; for (; i < src_limits::digits; ++i) { result *= 2; } for (; i < dst_limits::digits; ++i) { result *= 2; } for (; i > dst_limits::digits; --i) { result += 1; result /= 2; } return -result; } } template <typename SRC> static constexpr auto safe_range() { static_assert(meta::contains_v<SrcTypes, SRC>); using dst_limits = std::numeric_limits<DST>; using src_limits = std::numeric_limits<SRC>; if constexpr (std::is_same_v<SRC, DST>) { return std::make_tuple(); } else if constexpr (std::is_integral_v<DST> && std::is_integral_v<SRC>) { constexpr SRC safe_min = std::max<int64_t>(dst_limits::min(), src_limits::min()); constexpr SRC safe_max = std::min<uint64_t>(dst_limits::max(), src_limits::max()); if constexpr (safe_min <= src_limits::min() && safe_max >= src_limits::max()) { return std::make_tuple(); } else { return std::tuple<SRC, SRC>(safe_min, safe_max); } } else if constexpr (std::is_integral_v<DST> && std::is_floating_point_v<SRC>) { return std::tuple<SRC, SRC>(min_float_to_int_safe_value<SRC>(), max_float_to_int_safe_value<SRC>()); } else if constexpr (std::is_floating_point_v<DST> && std::is_floating_point_v<SRC>) { constexpr bool ub_check = (src_limits::max() <= dst_limits::max() || static_cast<DST>(src_limits::max()) == dst_limits::max() || static_cast<DST>(src_limits::max()) == dst_limits::infinity()); static_assert(ub_check); return std::make_tuple(); } else { return std::make_tuple(); } } template <typename SRC> auto operator()(SRC src) const { constexpr auto src_range = safe_range<SRC>(); if constexpr (std::tuple_size_v<decltype(src_range)> == 0) { return static_cast<DST>(src); } else { using ReturnType = absl::StatusOr<DST>; const auto& [range_min, range_max] = src_range; if (range_min <= src && src <= range_max) { return ReturnType(static_cast<DST>(src)); } else { return ReturnType(absl::InvalidArgumentError(absl::StrCat( "cannot cast ", ::arolla::Repr(src), " to ", std::is_unsigned_v<DST> ? "u" : "", "int", 8 * sizeof(DST)))); } } } }; struct ToBoolOp { using run_on_missing = std::true_type; template <typename T> bool operator()(const T& x) const { return x != 0; } }; struct ToOptionalOp { using run_on_missing = std::true_type; template <typename T> OptionalValue<T> operator()(const T& x) const { return OptionalValue<T>(x); } }; struct GetOptionalValueOp { template <typename T> absl::StatusOr<T> operator()(const OptionalValue<T>& x) const { if (!x.present) { return absl::FailedPreconditionError( "core.get_optional_value expects present value, got missing"); } return x.value; } }; } #endif
#include "arolla/qexpr/operators/core/cast_operator.h" #include <cmath> #include <cstdint> #include <limits> #include <tuple> #include <type_traits> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "arolla/util/meta.h" #include "arolla/util/testing/status_matchers_backport.h" namespace arolla { namespace { using ::arolla::testing::IsOkAndHolds; using ::arolla::testing::StatusIs; using ::testing::Eq; TEST(CastOperatorTest, CastToInt32UB) { constexpr auto kInt32Min = std::numeric_limits<int32_t>::min(); constexpr auto kInt32Max = std::numeric_limits<int32_t>::max(); constexpr auto kDoubleInt32Min = static_cast<double>(kInt32Min); constexpr auto kDoubleInt32Max = static_cast<double>(kInt32Max); const auto to_int32 = CastOp<int32_t>(); EXPECT_THAT(to_int32(kDoubleInt32Min), IsOkAndHolds(kInt32Min)); EXPECT_THAT(to_int32(kDoubleInt32Max), IsOkAndHolds(kInt32Max)); EXPECT_THAT(to_int32(std::nextafter(kDoubleInt32Min - 1., 0.)), IsOkAndHolds(kInt32Min)); EXPECT_THAT(to_int32(std::nextafter(kDoubleInt32Max + 1., 0.)), IsOkAndHolds(kInt32Max)); EXPECT_THAT(to_int32(kDoubleInt32Min - 1.), StatusIs(absl::StatusCode::kInvalidArgument, "cannot cast float64{-2147483649} to int32")); EXPECT_THAT(to_int32(kDoubleInt32Max + 1.), StatusIs(absl::StatusCode::kInvalidArgument, "cannot cast float64{2147483648} to int32")); } TEST(CastOperatorTest, CastFromUInt64) { EXPECT_THAT((CastOp<int32_t>()(uint64_t{1})), IsOkAndHolds(int32_t{1})); EXPECT_THAT((CastOp<float>()(uint64_t{1})), Eq(1.0f)); EXPECT_THAT((CastOp<double>()(uint64_t{1})), Eq(1.0)); EXPECT_THAT((CastOp<int64_t>()(uint64_t{1ull << 63})), StatusIs(absl::StatusCode::kInvalidArgument, "cannot cast uint64{9223372036854775808} to int64")); } TEST(CastOperatorTest, CastToUInt64) { CastOp<uint64_t> to_uint64; EXPECT_THAT(to_uint64(std::numeric_limits<int64_t>::max()), IsOkAndHolds(uint64_t{std::numeric_limits<int64_t>::max()})); EXPECT_THAT(to_uint64(double{1.0}), IsOkAndHolds(uint64_t{1})); EXPECT_THAT(to_uint64(float{1.0f}), IsOkAndHolds(uint64_t{1})); EXPECT_THAT(to_uint64(uint64_t{1}), Eq(uint64_t{1})); EXPECT_THAT(to_uint64(float{-1.0f}), StatusIs(absl::StatusCode::kInvalidArgument, "cannot cast -1. to uint64")); EXPECT_THAT(to_uint64(double{-1.0f}), StatusIs(absl::StatusCode::kInvalidArgument, "cannot cast float64{-1} to uint64")); EXPECT_THAT( to_uint64(int32_t{-1}), StatusIs(absl::StatusCode::kInvalidArgument, "cannot cast -1 to uint64")); EXPECT_THAT(to_uint64(int64_t{-1}), StatusIs(absl::StatusCode::kInvalidArgument, "cannot cast int64{-1} to uint64")); } TEST(CastOperatorTest, CastTo_SafeRange_FloatToInt) { using Srcs = meta::type_list<float, double>; using Dsts = meta::type_list<int32_t, int64_t, uint64_t>; meta::foreach_type<Srcs>([](auto src_type) { using SRC = typename decltype(src_type)::type; meta::foreach_type<Dsts>([](auto dst_type) { using DST = typename decltype(dst_type)::type; using dst_limits = std::numeric_limits<DST>; using src_limits = std::numeric_limits<SRC>; const auto [range_min, range_max] = CastOp<DST>::template safe_range<SRC>(); ASSERT_EQ(static_cast<DST>(range_min), dst_limits::min()); if (!std::is_unsigned_v<DST>) { ASSERT_NE( std::trunc(range_min), std::trunc(std::nextafter(range_min, -src_limits::infinity()))); } ASSERT_LE(static_cast<DST>(range_max), dst_limits::max()); ASSERT_GE(std::nextafter(range_max, src_limits::infinity()), std::exp2(static_cast<SRC>(dst_limits::digits))); }); }); } TEST(CastOperatorTest, CastTo_SafeRange_IntToInt) { ASSERT_EQ(CastOp<int32_t>::safe_range<uint64_t>(), (std::tuple<uint64_t, uint64_t>(0, (1ull << 31) - 1))); ASSERT_EQ(CastOp<int64_t>::safe_range<uint64_t>(), (std::tuple<uint64_t, uint64_t>(0, (1ull << 63) - 1))); ASSERT_EQ(CastOp<uint64_t>::safe_range<int32_t>(), (std::tuple<uint32_t, uint32_t>(0, (1u << 31) - 1))); ASSERT_EQ(CastOp<uint64_t>::safe_range<int64_t>(), (std::tuple<int64_t, int64_t>(0, (1ull << 63) - 1))); } TEST(CastOperatorTest, CastTo_SafeRange_Unneeded) { ASSERT_EQ(CastOp<int64_t>::safe_range<int32_t>(), std::tuple<>()); ASSERT_EQ(CastOp<int32_t>::safe_range<bool>(), std::tuple<>()); ASSERT_EQ(CastOp<float>::safe_range<double>(), std::tuple<>()); ASSERT_EQ(CastOp<double>::safe_range<float>(), std::tuple<>()); } } }
template <typename SRC> static constexpr SRC max_float_to_int_safe_value() { using dst_limits = std::numeric_limits<DST>; using src_limits = std::numeric_limits<SRC>; static_assert(dst_limits::is_integer); static_assert(std::is_floating_point_v<SRC>); SRC result = 0; int i = 0; for (; i < src_limits::digits; ++i) { result *= 2; result += 1; } for (; i < dst_limits::digits; ++i) { result *= 2; } for (; i > dst_limits::digits; --i) { result /= 2; } return result; }
TEST(CastOperatorTest, CastTo_SafeRange_FloatToInt) { using Srcs = meta::type_list<float, double>; using Dsts = meta::type_list<int32_t, int64_t, uint64_t>; meta::foreach_type<Srcs>([](auto src_type) { using SRC = typename decltype(src_type)::type; meta::foreach_type<Dsts>([](auto dst_type) { using DST = typename decltype(dst_type)::type; using dst_limits = std::numeric_limits<DST>; using src_limits = std::numeric_limits<SRC>; const auto [range_min, range_max] = CastOp<DST>::template safe_range<SRC>(); ASSERT_EQ(static_cast<DST>(range_min), dst_limits::min()); if (!std::is_unsigned_v<DST>) { ASSERT_NE( std::trunc(range_min), std::trunc(std::nextafter(range_min, -src_limits::infinity()))); } ASSERT_LE(static_cast<DST>(range_max), dst_limits::max()); ASSERT_GE(std::nextafter(range_max, src_limits::infinity()), std::exp2(static_cast<SRC>(dst_limits::digits))); }); }); }
#ifndef QUICHE_QUIC_CORE_QUIC_LRU_CACHE_H_ #define QUICHE_QUIC_CORE_QUIC_LRU_CACHE_H_ #include <memory> #include "quiche/quic/platform/api/quic_export.h" #include "quiche/quic/platform/api/quic_flag_utils.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/common/quiche_linked_hash_map.h" namespace quic { template <class K, class V, class Hash = std::hash<K>, class Eq = std::equal_to<K>> class QUICHE_EXPORT QuicLRUCache { private: using HashMapType = typename quiche::QuicheLinkedHashMap<K, std::unique_ptr<V>, Hash, Eq>; public: using iterator = typename HashMapType::iterator; using const_iterator = typename HashMapType::const_iterator; using reverse_iterator = typename HashMapType::reverse_iterator; using const_reverse_iterator = typename HashMapType::const_reverse_iterator; explicit QuicLRUCache(size_t capacity) : capacity_(capacity) {} QuicLRUCache(const QuicLRUCache&) = delete; QuicLRUCache& operator=(const QuicLRUCache&) = delete; iterator begin() { return cache_.begin(); } const_iterator begin() const { return cache_.begin(); } iterator end() { return cache_.end(); } const_iterator end() const { return cache_.end(); } reverse_iterator rbegin() { return cache_.rbegin(); } const_reverse_iterator rbegin() const { return cache_.rbegin(); } reverse_iterator rend() { return cache_.rend(); } const_reverse_iterator rend() const { return cache_.rend(); } void Insert(const K& key, std::unique_ptr<V> value) { auto it = cache_.find(key); if (it != cache_.end()) { cache_.erase(it); } cache_.emplace(key, std::move(value)); if (cache_.size() > capacity_) { cache_.pop_front(); } QUICHE_DCHECK_LE(cache_.size(), capacity_); } iterator Lookup(const K& key) { auto iter = cache_.find(key); if (iter == cache_.end()) { return iter; } std::unique_ptr<V> value = std::move(iter->second); cache_.erase(iter); auto result = cache_.emplace(key, std::move(value)); QUICHE_DCHECK(result.second); return result.first; } iterator Erase(iterator iter) { return cache_.erase(iter); } void Clear() { cache_.clear(); } size_t MaxSize() const { return capacity_; } size_t Size() const { return cache_.size(); } private: quiche::QuicheLinkedHashMap<K, std::unique_ptr<V>, Hash, Eq> cache_; const size_t capacity_; }; } #endif
#include "quiche/quic/core/quic_lru_cache.h" #include <memory> #include <utility> #include "quiche/quic/platform/api/quic_test.h" namespace quic { namespace test { namespace { struct CachedItem { explicit CachedItem(uint32_t new_value) : value(new_value) {} uint32_t value; }; TEST(QuicLRUCacheTest, InsertAndLookup) { QuicLRUCache<int, CachedItem> cache(5); EXPECT_EQ(cache.end(), cache.Lookup(1)); EXPECT_EQ(0u, cache.Size()); EXPECT_EQ(5u, cache.MaxSize()); std::unique_ptr<CachedItem> item1(new CachedItem(11)); cache.Insert(1, std::move(item1)); EXPECT_EQ(1u, cache.Size()); EXPECT_EQ(11u, cache.Lookup(1)->second->value); std::unique_ptr<CachedItem> item2(new CachedItem(12)); cache.Insert(1, std::move(item2)); EXPECT_EQ(1u, cache.Size()); EXPECT_EQ(12u, cache.Lookup(1)->second->value); std::unique_ptr<CachedItem> item3(new CachedItem(13)); cache.Insert(3, std::move(item3)); EXPECT_EQ(2u, cache.Size()); auto iter = cache.Lookup(3); ASSERT_NE(cache.end(), iter); EXPECT_EQ(13u, iter->second->value); cache.Erase(iter); ASSERT_EQ(cache.end(), cache.Lookup(3)); EXPECT_EQ(1u, cache.Size()); cache.Clear(); EXPECT_EQ(0u, cache.Size()); } TEST(QuicLRUCacheTest, Eviction) { QuicLRUCache<int, CachedItem> cache(3); for (size_t i = 1; i <= 4; ++i) { std::unique_ptr<CachedItem> item(new CachedItem(10 + i)); cache.Insert(i, std::move(item)); } EXPECT_EQ(3u, cache.Size()); EXPECT_EQ(3u, cache.MaxSize()); EXPECT_EQ(cache.end(), cache.Lookup(1)); EXPECT_EQ(14u, cache.Lookup(4)->second->value); EXPECT_EQ(12u, cache.Lookup(2)->second->value); std::unique_ptr<CachedItem> item5(new CachedItem(15)); cache.Insert(5, std::move(item5)); EXPECT_EQ(cache.end(), cache.Lookup(3)); EXPECT_EQ(15u, cache.Lookup(5)->second->value); cache.Clear(); EXPECT_EQ(0u, cache.Size()); } } } }
iterator begin() { return cache_.begin(); } const_iterator begin() const { return cache_.begin(); }
TEST(QuicLRUCacheTest, InsertAndLookup) { QuicLRUCache<int, CachedItem> cache(5); EXPECT_EQ(cache.end(), cache.Lookup(1)); EXPECT_EQ(0u, cache.Size()); EXPECT_EQ(5u, cache.MaxSize()); std::unique_ptr<CachedItem> item1(new CachedItem(11)); cache.Insert(1, std::move(item1)); EXPECT_EQ(1u, cache.Size()); EXPECT_EQ(11u, cache.Lookup(1)->second->value); std::unique_ptr<CachedItem> item2(new CachedItem(12)); cache.Insert(1, std::move(item2)); EXPECT_EQ(1u, cache.Size()); EXPECT_EQ(12u, cache.Lookup(1)->second->value); std::unique_ptr<CachedItem> item3(new CachedItem(13)); cache.Insert(3, std::move(item3)); EXPECT_EQ(2u, cache.Size()); auto iter = cache.Lookup(3); ASSERT_NE(cache.end(), iter); EXPECT_EQ(13u, iter->second->value); cache.Erase(iter); ASSERT_EQ(cache.end(), cache.Lookup(3)); EXPECT_EQ(1u, cache.Size()); cache.Clear(); EXPECT_EQ(0u, cache.Size()); } TEST(QuicLRUCacheTest, Eviction) { QuicLRUCache<int, CachedItem> cache(3); for (size_t i = 1; i <= 4; ++i) { std::unique_ptr<CachedItem> item(new CachedItem(10 + i)); cache.Insert(i, std::move(item)); } EXPECT_EQ(3u, cache.Size()); EXPECT_EQ(3u, cache.MaxSize()); EXPECT_EQ(cache.end(), cache.Lookup(1)); EXPECT_EQ(14u, cache.Lookup(4)->second->value); EXPECT_EQ(12u, cache.Lookup(2)->second->value); std::unique_ptr<CachedItem> item5(new CachedItem(15)); cache.Insert(5, std::move(item5)); EXPECT_EQ(cache.end(), cache.Lookup(3)); EXPECT_EQ(15u, cache.Lookup(5)->second->value); cache.Clear(); EXPECT_EQ(0u, cache.Size()); }
#include "xla/client/lib/sorting.h" #include <vector> #include "xla/client/lib/comparators.h" #include "xla/client/lib/constants.h" #include "xla/client/lib/loops.h" #include "xla/client/lib/slicing.h" #include "xla/client/xla_builder.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" namespace xla { XlaOp TopK(XlaOp input, int64_t k, PrimitiveType index_type) { XlaBuilder* const builder = input.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input)); int last_dim = input_shape.dimensions_size() - 1; int64_t last_dim_size = input_shape.dimensions(last_dim); const int64_t kPerPartitionSize = 8192; const int64_t kLastDimSizeThreshold = 524288; const int64_t kMinNumPartitions = 8; const int64_t kMinimalK = 1000; if ((k >= kMinimalK) && (k < kPerPartitionSize) && (kPerPartitionSize / k > 2) && last_dim_size >= kLastDimSizeThreshold) { int64_t num_partitions = CeilOfRatio(last_dim_size - k, kPerPartitionSize - k); if (num_partitions >= kMinNumPartitions) { return TopKWithPartitions(input, k, num_partitions, index_type); } } Shape iota_shape = ShapeUtil::MakeShape(index_type, input_shape.dimensions()); XlaOp iota = Iota(builder, iota_shape, last_dim); for (int64_t i = 0; i < input_shape.rank(); ++i) { if (input_shape.is_dynamic_dimension(i)) { iota = SetDimensionSize(iota, GetDimensionSize(input, i), i); } } auto input_dims = input_shape.dimensions(); constexpr int32_t kLow16BitsLimit = int32_t{1} << 16; constexpr int32_t kLow16BitsMask = kLow16BitsLimit - 1; constexpr int32_t kHigh16BitsMask = ~kLow16BitsMask; constexpr int kMaxLastDimSizeForSmallBatches = 1500; constexpr int kSmallBatchSizeThreshold = 8; const bool use_packed_bf16_sort = (input_shape.element_type() == BF16 && last_dim_size < kLow16BitsLimit && (last_dim_size < kMaxLastDimSizeForSmallBatches || (input_shape.rank() == 2 && input_shape.dimensions(0) >= kSmallBatchSizeThreshold))); std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0); std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end()); limit_indices[last_dim] = k; std::vector<int64_t> strides(input_shape.dimensions_size(), 1); XlaOp values; XlaOp indices; if (use_packed_bf16_sort) { auto sign_magnitude_to_from_ones_complement = [builder](const XlaOp in) { constexpr int32_t kAllNonSignBits = 0x7fffffff; XlaOp in_s32 = BitcastConvertType(in, S32); return Xor( And(in_s32, ConstantR0<int32_t>(builder, kAllNonSignBits)), ShiftRightArithmetic(in_s32, ConstantR0<int32_t>(builder, 31))); }; XlaOp input_f32_trimmed = Or(sign_magnitude_to_from_ones_complement( BitcastConvertType(ConvertElementType(input, F32), S32)), ConstantR0<int32_t>(builder, kLow16BitsMask)); XlaOp input_and_iota = Xor(input_f32_trimmed, iota); XlaOp sort_result_raw = Sort({input_and_iota}, CreateScalarGtComputation({index_type}, builder), last_dim, false); sort_result_raw = Slice(sort_result_raw, start_indices, limit_indices, strides); sort_result_raw = RemoveDynamicDimension(sort_result_raw, last_dim); values = ConvertElementType( BitcastConvertType( And(sign_magnitude_to_from_ones_complement(sort_result_raw), ConstantR0<int32_t>(builder, kHigh16BitsMask)), F32), BF16); indices = And( Xor(sort_result_raw, ConstantR0<int32_t>(builder, kLow16BitsMask)), ConstantR0<int32_t>(builder, kLow16BitsMask)); } else { XlaOp sort_result = Sort({input, iota}, CreateScalarGtComputation( {input_shape.element_type(), index_type}, iota.builder()), last_dim, true); values = Slice(GetTupleElement(sort_result, 0), start_indices, limit_indices, strides); values = RemoveDynamicDimension(values, last_dim); indices = Slice(GetTupleElement(sort_result, 1), start_indices, limit_indices, strides); indices = RemoveDynamicDimension(indices, last_dim); } return Tuple(builder, {values, indices}); }); } XlaOp TopKWithPartitions(XlaOp input, int64_t k, int64_t num_partitions, PrimitiveType index_type) { XlaBuilder* const builder = input.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input)); int last_dim = input_shape.dimensions_size() - 1; auto input_dims = input_shape.dimensions(); int64_t last_dim_size = input_shape.dimensions(last_dim); const int64_t per_partition_size = CeilOfRatio(last_dim_size, num_partitions); if (k >= per_partition_size) { return TopK(input, k, index_type); } Shape iota_shape = ShapeUtil::MakeShape(index_type, input_shape.dimensions()); XlaOp iota = Iota(builder, iota_shape, last_dim); for (int64_t i = 0; i < input_shape.rank(); ++i) { if (input_shape.is_dynamic_dimension(i)) { iota = SetDimensionSize(iota, GetDimensionSize(input, i), i); } } auto topk_body_fn = [&](XlaOp partition, absl::Span<const XlaOp> values_and_indices, XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> { auto values = values_and_indices[0]; auto indices = values_and_indices[1]; auto input = values_and_indices[2]; auto iota = values_and_indices[3]; XlaOp start = Mul(Add(partition, One(builder, index_type)), ConstantR0WithType(builder, index_type, per_partition_size)); XlaOp sliced_input = DynamicSliceInMinorDims(input, {start}, {per_partition_size}); XlaOp sliced_indices = DynamicSliceInMinorDims(iota, {start}, {per_partition_size}); sliced_input = ConcatInDim(builder, {values, sliced_input}, last_dim); sliced_indices = ConcatInDim(builder, {indices, sliced_indices}, last_dim); XlaOp sort_result = Sort( {sliced_input, sliced_indices}, CreateScalarGtComputation({input_shape.element_type(), index_type}, sliced_indices.builder()), last_dim, true); std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0); std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end()); std::vector<int64_t> strides(input_shape.dimensions_size(), 1); start_indices[last_dim] = 0; limit_indices[last_dim] = k; values = Slice(GetTupleElement(sort_result, 0), start_indices, limit_indices, strides); indices = Slice(GetTupleElement(sort_result, 1), start_indices, limit_indices, strides); return std::vector<XlaOp>{values, indices, input, iota}; }; std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0); std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end()); std::vector<int64_t> strides(input_shape.dimensions_size(), 1); start_indices[last_dim] = 0; limit_indices[last_dim] = per_partition_size; XlaOp sliced_input = Slice(input, start_indices, limit_indices, strides); XlaOp sliced_indices = Slice(iota, start_indices, limit_indices, strides); XlaOp sort_result = Sort({sliced_input, sliced_indices}, CreateScalarGtComputation({input_shape.element_type(), index_type}, sliced_indices.builder()), last_dim, true); start_indices[last_dim] = 0; limit_indices[last_dim] = k; XlaOp values = Slice(GetTupleElement(sort_result, 0), start_indices, limit_indices, strides); XlaOp indices = Slice(GetTupleElement(sort_result, 1), start_indices, limit_indices, strides); TF_ASSIGN_OR_RETURN( auto values_and_indices, ForEachIndex(num_partitions - 1, index_type, topk_body_fn, {values, indices, input, iota}, "topk_with_partition", builder)); return Tuple(builder, {values_and_indices[0], values_and_indices[1]}); }); } }
#include "xla/client/lib/sorting.h" #include <algorithm> #include <functional> #include <limits> #include <random> #include <vector> #include "xla/client/xla_builder.h" #include "xla/test.h" #include "xla/tests/client_library_test_base.h" #include "xla/tests/test_macros.h" #include "xla/types.h" namespace xla { namespace { using SortingTest = ClientLibraryTestBase; XLA_TEST_F(SortingTest, TopK3From8Values) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}); xla::GetTupleElement(xla::TopK(x, 3), 0); ComputeAndCompareR1<float>(&builder, {7.0, 6.0, 5.0}, {}); } XLA_TEST_F(SortingTest, TopK3From8Indices) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<float>(&builder, {7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0}); xla::GetTupleElement(xla::TopK(x_rev, 3), 1); ComputeAndCompareR1<int>(&builder, {0, 1, 2}, {}); } XLA_TEST_F(SortingTest, TopK3From8Int16Indices) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}); xla::GetTupleElement(xla::TopK(x, 3, PrimitiveType::S16), 1); ComputeAndCompareR1<int16_t>(&builder, {7, 6, 5}, {}); } XLA_TEST_F(SortingTest, TopKFullSortMinInt) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<int>(&builder, {std::numeric_limits<int>::min(), std::numeric_limits<int>::min() + 1, std::numeric_limits<int>::max()}); xla::GetTupleElement(xla::TopK(x_rev, 3), 1); ComputeAndCompareR1<int>(&builder, {2, 1, 0}, {}); } XLA_TEST_F(SortingTest, TopKFullSort) { XlaBuilder builder(TestName()); const int kSize = 16; std::mt19937 eng; std::uniform_real_distribution<float> u_dist(0.0, 100.0); auto gen = std::bind(u_dist, eng); std::vector<float> inputs(kSize); std::generate(inputs.begin(), inputs.end(), gen); auto x = ConstantR1<float>(&builder, inputs); xla::GetTupleElement(xla::TopK(x, kSize), 0); absl::c_sort(inputs, std::greater<float>()); ComputeAndCompareR1<float>(&builder, inputs, {}); } XLA_TEST_F(SortingTest, TopKFullSortWithDuplicates) { XlaBuilder builder(TestName()); XlaOp a; auto a_data = CreateR1Parameter<int>({1, 1, 2, 2, 1}, 0, "a", &builder, &a); xla::GetTupleElement(xla::TopK(a, 5), 1); ComputeAndCompareR1<int>(&builder, {2, 3, 0, 1, 4}, {a_data.get()}); } XLA_TEST_F(SortingTest, TopK3From8Values2Partitions) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}); xla::GetTupleElement(xla::TopKWithPartitions(x, 3, 2), 0); ComputeAndCompareR1<float>(&builder, {7.0, 6.0, 5.0}, {}); } XLA_TEST_F(SortingTest, TopK3From8Indices2Partitions) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<float>(&builder, {7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0}); xla::GetTupleElement(xla::TopKWithPartitions(x_rev, 3, 2), 1); ComputeAndCompareR1<int>(&builder, {0, 1, 2}, {}); } XLA_TEST_F(SortingTest, TopK3From8Values3Partitions) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}); xla::GetTupleElement(xla::TopKWithPartitions(x, 3, 3), 0); ComputeAndCompareR1<float>(&builder, {7.0, 6.0, 5.0}, {}); } XLA_TEST_F(SortingTest, TopK3From8Indices3Partitions) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<float>(&builder, {7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0}); xla::GetTupleElement(xla::TopKWithPartitions(x_rev, 3, 3), 1); ComputeAndCompareR1<int>(&builder, {0, 1, 2}, {}); } XLA_TEST_F(SortingTest, TopK3From8Values5Partitions) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}); xla::GetTupleElement(xla::TopKWithPartitions(x, 3, 5), 0); ComputeAndCompareR1<float>(&builder, {7.0, 6.0, 5.0}, {}); } XLA_TEST_F(SortingTest, DISABLED_TopKLargeInput) { XlaBuilder builder(TestName()); Array<float> input({2, 1000000}); input.FillRandom(1.0f, 2.0f); auto x = CreateConstantFromLiteral(LiteralUtil::CreateFromArray(input), &builder); Array2D<float> expected_array(2, 1000); expected_array.Fill(2.0f); xla::GetTupleElement(xla::TopK(x, 1000), 0); ErrorSpec error_spec(10.0f, 10.0f); ComputeAndCompareR2<float>(&builder, expected_array, {}, error_spec); } XLA_TEST_F(SortingTest, TopK3From8Indices5Partitions) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<float>(&builder, {7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0}); xla::GetTupleElement(xla::TopKWithPartitions(x_rev, 3, 5), 1); ComputeAndCompareR1<int>(&builder, {0, 1, 2}, {}); } XLA_TEST_F(SortingTest, TopK3From8Int16Indices5Partitions) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<float>(&builder, {7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0}); xla::GetTupleElement(xla::TopKWithPartitions(x_rev, 3, 5, PrimitiveType::S16), 1); ComputeAndCompareR1<int16_t>(&builder, {0, 1, 2}, {}); } XLA_TEST_F(SortingTest, TopKFullSortWithDuplicates2Partitions) { XlaBuilder builder(TestName()); XlaOp a; auto a_data = CreateR1Parameter<int>({1, 1, 2, 2, 1}, 0, "a", &builder, &a); xla::GetTupleElement(xla::TopKWithPartitions(a, 3, 2), 1); ComputeAndCompareR1<int>(&builder, {2, 3, 0}, {a_data.get()}); } } }
XlaOp TopKWithPartitions(XlaOp input, int64_t k, int64_t num_partitions, PrimitiveType index_type) { XlaBuilder* const builder = input.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input)); int last_dim = input_shape.dimensions_size() - 1; auto input_dims = input_shape.dimensions(); int64_t last_dim_size = input_shape.dimensions(last_dim); const int64_t per_partition_size = CeilOfRatio(last_dim_size, num_partitions); if (k >= per_partition_size) { return TopK(input, k, index_type); } Shape iota_shape = ShapeUtil::MakeShape(index_type, input_shape.dimensions()); XlaOp iota = Iota(builder, iota_shape, last_dim); for (int64_t i = 0; i < input_shape.rank(); ++i) { if (input_shape.is_dynamic_dimension(i)) { iota = SetDimensionSize(iota, GetDimensionSize(input, i), i); } } auto topk_body_fn = [&](XlaOp partition, absl::Span<const XlaOp> values_and_indices, XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> { auto values = values_and_indices[0]; auto indices = values_and_indices[1]; auto input = values_and_indices[2]; auto iota = values_and_indices[3]; XlaOp start = Mul(Add(partition, One(builder, index_type)), ConstantR0WithType(builder, index_type, per_partition_size)); XlaOp sliced_input = DynamicSliceInMinorDims(input, {start}, {per_partition_size}); XlaOp sliced_indices = DynamicSliceInMinorDims(iota, {start}, {per_partition_size}); sliced_input = ConcatInDim(builder, {values, sliced_input}, last_dim); sliced_indices = ConcatInDim(builder, {indices, sliced_indices}, last_dim); XlaOp sort_result = Sort( {sliced_input, sliced_indices}, CreateScalarGtComputation({input_shape.element_type(), index_type}, sliced_indices.builder()), last_dim, true); std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0); std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end()); std::vector<int64_t> strides(input_shape.dimensions_size(), 1); start_indices[last_dim] = 0; limit_indices[last_dim] = k; values = Slice(GetTupleElement(sort_result, 0), start_indices, limit_indices, strides); indices = Slice(GetTupleElement(sort_result, 1), start_indices, limit_indices, strides); return std::vector<XlaOp>{values, indices, input, iota}; }; std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0); std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end()); std::vector<int64_t> strides(input_shape.dimensions_size(), 1); start_indices[last_dim] = 0; limit_indices[last_dim] = per_partition_size; XlaOp sliced_input = Slice(input, start_indices, limit_indices, strides); XlaOp sliced_indices = Slice(iota, start_indices, limit_indices, strides); XlaOp sort_result = Sort({sliced_input, sliced_indices}, CreateScalarGtComputation({input_shape.element_type(), index_type}, sliced_indices.builder()), last_dim, true); start_indices[last_dim] = 0; limit_indices[last_dim] = k; XlaOp values = Slice(GetTupleElement(sort_result, 0), start_indices, limit_indices, strides); XlaOp indices = Slice(GetTupleElement(sort_result, 1), start_indices, limit_indices, strides); TF_ASSIGN_OR_RETURN( auto values_and_indices, ForEachIndex(num_partitions - 1, index_type, topk_body_fn, {values, indices, input, iota}, "topk_with_partition", builder)); return Tuple(builder, {values_and_indices[0], values_and_indices[1]}); }); }
XLA_TEST_F(SortingTest, TopK3From8Values2Partitions) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}); xla::GetTupleElement(xla::TopKWithPartitions(x, 3, 2), 0); ComputeAndCompareR1<float>(&builder, {7.0, 6.0, 5.0}, {}); } XLA_TEST_F(SortingTest, TopK3From8Indices2Partitions) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<float>(&builder, {7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0}); xla::GetTupleElement(xla::TopKWithPartitions(x_rev, 3, 2), 1); ComputeAndCompareR1<int>(&builder, {0, 1, 2}, {}); } XLA_TEST_F(SortingTest, TopK3From8Values3Partitions) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}); xla::GetTupleElement(xla::TopKWithPartitions(x, 3, 3), 0); ComputeAndCompareR1<float>(&builder, {7.0, 6.0, 5.0}, {}); } XLA_TEST_F(SortingTest, TopK3From8Indices3Partitions) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<float>(&builder, {7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0}); xla::GetTupleElement(xla::TopKWithPartitions(x_rev, 3, 3), 1); ComputeAndCompareR1<int>(&builder, {0, 1, 2}, {}); } XLA_TEST_F(SortingTest, TopK3From8Values5Partitions) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}); xla::GetTupleElement(xla::TopKWithPartitions(x, 3, 5), 0); ComputeAndCompareR1<float>(&builder, {7.0, 6.0, 5.0}, {}); } XLA_TEST_F(SortingTest, TopK3From8Indices5Partitions) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<float>(&builder, {7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0}); xla::GetTupleElement(xla::TopKWithPartitions(x_rev, 3, 5), 1); ComputeAndCompareR1<int>(&builder, {0, 1, 2}, {}); } XLA_TEST_F(SortingTest, TopK3From8Int16Indices5Partitions) { XlaBuilder builder(TestName()); auto x_rev = ConstantR1<float>(&builder, {7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0}); xla::GetTupleElement(xla::TopKWithPartitions(x_rev, 3, 5, PrimitiveType::S16), 1); ComputeAndCompareR1<int16_t>(&builder, {0, 1, 2}, {}); } XLA_TEST_F(SortingTest, TopKFullSortWithDuplicates2Partitions) { XlaBuilder builder(TestName()); XlaOp a; auto a_data = CreateR1Parameter<int>({1, 1, 2, 2, 1}, 0, "a", &builder, &a); xla::GetTupleElement(xla::TopKWithPartitions(a, 3, 2), 1); ComputeAndCompareR1<int>(&builder, {2, 3, 0}, {a_data.get()}); }
#include "tensorflow/core/profiler/convert/dcn_analysis.h" #include <algorithm> #include <functional> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/strings/string_view.h" #include "tensorflow/core/profiler/convert/dcn_utils.h" #include "tensorflow/core/profiler/utils/xplane_builder.h" #include "tsl/profiler/utils/math_utils.h" #include "tsl/profiler/utils/tpu_xplane_utils.h" #include "tsl/profiler/utils/xplane_schema.h" namespace tensorflow { namespace profiler { using tsl::profiler::kMaxCollectivesToDisplay; using tsl::profiler::kMegaScaleDcnReceive; using tsl::profiler::LineIdType; using tsl::profiler::MicroToNano; void DcnBurstManager::ResetBurstState() { active_burst_messages_ = 0; straggler_idx_ = 0; active_burst_.num_messages = 0; active_burst_.max_overlapping_messages = 0; active_burst_.start_timestamp_ns = 0; active_burst_.end_timestamp_ns = 0; active_burst_.burst_size_bytes = 0; } void DcnBurstManager::CreateBursts(const TimestampMap& tm_events) { ResetBurstState(); for (const auto& tm_event : tm_events) { if (active_burst_messages_ < 0) { LOG_FIRST_N(WARNING, 10) << "Negative messages in burst, bursts will be incorrect."; } if (active_burst_messages_ == 0) { active_burst_.start_timestamp_ns = tm_event.first; } active_burst_messages_ += tm_event.second->message_diff; if (tm_event.second->message_diff > 0) { active_burst_.num_messages += tm_event.second->message_diff; active_burst_.burst_size_bytes += tm_event.second->size_diff; } else { Straggler straggler = {tm_event.second->duration_ns, tm_event.second->timestamp_ns, tm_event.second->size_diff * (-1), tm_event.second->src_slice_id}; active_burst_.stragglers[straggler_idx_] = straggler; straggler_idx_ = (straggler_idx_ + 1) % kMaxStragglersPerBurst; } active_burst_.max_overlapping_messages = std::max(active_burst_.max_overlapping_messages, static_cast<uint64_t>(active_burst_messages_)); if (active_burst_messages_ == 0) { active_burst_.end_timestamp_ns = tm_event.first; total_latency_ += (active_burst_.end_timestamp_ns - active_burst_.start_timestamp_ns); bursts_.emplace_back(std::move(active_burst_)); ResetBurstState(); } } } DcnEventsProcessor::DcnEventsProcessor(uint32_t num_tpu_tensor_cores, bool is_megacore) : num_tpu_tensor_cores_(num_tpu_tensor_cores), is_megacore_(is_megacore) { registered_dcn_messages_.push_back(kMegaScaleDcnReceive); tpu_collective_ts_map_.resize(num_tpu_tensor_cores_); tpu_collective_bursts_.resize(num_tpu_tensor_cores_); } void DcnEventsProcessor::SetupMessageInfo(const XPlaneVisitor& plane) { plane.ForEachEventMetadata([&](const XEventMetadataVisitor& event_metadata) { if (std::find(registered_dcn_messages_.begin(), registered_dcn_messages_.end(), event_metadata.Name()) != registered_dcn_messages_.end()) { megascale_msg_[event_metadata.Name()] = event_metadata.Id(); } }); } uint32_t DcnEventsProcessor::FindTpuIdx(int tpu) { uint32_t num_tpus = num_tpu_tensor_cores_; if (is_megacore_) { num_tpus /= 2; } uint32_t tpu_idx = tpu % num_tpus; if (is_megacore_) { tpu_idx = tpu_idx * 2; } return tpu_idx; } void DcnEventsProcessor::GenerateTimestampEvents( const DcnMessage& dcn_message) { std::shared_ptr<TimestampEvent> start_event( new TimestampEvent{dcn_message.start_timestamp_ns, 0, 1, dcn_message.size_bytes, dcn_message.slice_src}); std::shared_ptr<TimestampEvent> end_event(new TimestampEvent{ dcn_message.end_timestamp_ns, static_cast<uint64_t>(MicroToNano(dcn_message.duration_us)), -1, -1 * dcn_message.size_bytes, dcn_message.slice_src}); std::pair<uint64_t, std::shared_ptr<TimestampEvent>> start_event_entry = std::make_pair(dcn_message.start_timestamp_ns, start_event); std::pair<uint64_t, std::shared_ptr<TimestampEvent>> end_event_entry = std::make_pair(dcn_message.end_timestamp_ns, end_event); host_ts_map_.insert(start_event_entry); host_ts_map_.insert(end_event_entry); const std::string& collective_name = dcn_message.collective_name; uint32_t tpu_idx = FindTpuIdx(dcn_message.tpu_dst); auto& m = tpu_collective_ts_map_[tpu_idx][collective_name]; m.insert(start_event_entry); m.insert(end_event_entry); } void DcnEventsProcessor::PrintTimestampEvents() { for (const auto& host_ts : host_ts_map_) { LOG(INFO) << host_ts.first << ": " << host_ts.second->timestamp_ns << " " << host_ts.second->duration_ns << " " << host_ts.second->message_diff << " " << host_ts.second->size_diff << " " << host_ts.second->src_slice_id; } for (uint32_t tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) { LOG(INFO) << "TPU: " << tpu_idx; for (const auto& col_id : tpu_collective_ts_map_[tpu_idx]) { LOG(INFO) << col_id.first; for (const auto& tpu_col_ts : tpu_collective_ts_map_[tpu_idx][col_id.first]) { LOG(INFO) << tpu_col_ts.first << ": " << tpu_col_ts.second->timestamp_ns << " " << tpu_col_ts.second->duration_ns << " " << tpu_col_ts.second->message_diff << " " << tpu_col_ts.second->size_diff << " " << tpu_col_ts.second->src_slice_id; } } } } uint32_t DcnEventsProcessor::NumCollectivesQualified( const std::vector<uint64_t>& latencies) { uint32_t num_collectives_qualified = 0; uint32_t max_collectives = kMaxCollectivesToDisplay - 1; for (const auto& lat : latencies) { if (lat < host_dcn_bursts_.TotalLatency() * 0.05) { return num_collectives_qualified; } else if (lat < host_dcn_bursts_.TotalLatency() * 0.2 && num_collectives_qualified >= (max_collectives / 2)) { return num_collectives_qualified; } else if (num_collectives_qualified >= max_collectives) { return num_collectives_qualified; } else { num_collectives_qualified++; } } return latencies.size(); } void DcnEventsProcessor::QualifyCollectives() { for (auto tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) { std::vector<uint64_t> latency_to_order; latency_to_order.reserve(tpu_collective_bursts_[tpu_idx].size()); for (const auto& col_info : tpu_collective_bursts_[tpu_idx]) { latency_to_order.emplace_back(col_info.second.TotalLatency()); } std::sort(latency_to_order.begin(), latency_to_order.end(), std::greater<uint64_t>()); uint32_t num_collectives_qualified = NumCollectivesQualified(latency_to_order); if (num_collectives_qualified > 0) { uint32_t min_latency_to_qualify = latency_to_order[num_collectives_qualified - 1]; uint32_t col_num = 0; for (auto& col_info : tpu_collective_bursts_[tpu_idx]) { if (col_info.second.TotalLatency() >= min_latency_to_qualify) { col_info.second.SetToDisplay(true); if (++col_num == kMaxCollectivesToDisplay - 1) break; } } } } } void DcnEventsProcessor::GenerateBursts() { host_dcn_bursts_.CreateBursts(host_ts_map_); host_dcn_bursts_.SetToDisplay(true); for (auto tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) { for (const auto& col_info : tpu_collective_ts_map_[tpu_idx]) { tpu_collective_bursts_[tpu_idx][col_info.first].CreateBursts( tpu_collective_ts_map_[tpu_idx][col_info.first]); } } QualifyCollectives(); } void DcnEventsProcessor::ProcessReceiveMessages(const XPlaneVisitor& plane) { plane.ForEachLine([&](const XLineVisitor& line) { uint32_t recv_msg_id = megascale_msg_[kMegaScaleDcnReceive]; line.ForEachEvent([&](const XEventVisitor& event) { if (event.Id() == recv_msg_id) { DcnMessage dcn_message = GetDcnMessageFromXEvent(event); if (dcn_message.validity_info == DCN_MESSAGE_VALID) { GenerateTimestampEvents(dcn_message); } received_messages_.emplace_back(std::move(dcn_message)); } }); }); GenerateBursts(); } absl::string_view DcnEventsProcessor::GetBwInfo(bool is_per_tpu, const DcnBurst& burst, float& burst_mean_bw, float& burst_bw_utilization) { absl::string_view bw_level; uint32_t bw_divider = 1; burst_mean_bw = static_cast<float>(burst.burst_size_bytes) / (burst.end_timestamp_ns - burst.start_timestamp_ns); if (is_per_tpu) { bw_divider = num_tpu_tensor_cores_; if (is_megacore_) { bw_divider /= 2; } } if (burst_mean_bw < kLimitLowHostDcnBw / bw_divider) { bw_level = "Low BW"; } else if (burst_mean_bw < kLimitMedHostDcnBw / bw_divider) { bw_level = "Med BW"; } else { bw_level = "High BW"; } burst_bw_utilization = burst_mean_bw / (kMaxHostDcnBw / bw_divider); return bw_level; } void DcnEventsProcessor::AddHostDcnTrafficToXPlane(XPlane* host_xplane) { if (!host_dcn_bursts_.ToDisplay()) return; XPlaneBuilder plane_builder(host_xplane); XLineBuilder line = plane_builder.GetOrCreateLine(LineIdType::kDcnHostTraffic); line.SetNameIfEmpty("DCN Host Bandwidth"); line.SetTimestampNs(0); XStatMetadata* bw_stat_metadata = plane_builder.GetOrCreateStatMetadata("Bandwidth (GBytes/sec)"); XStatMetadata* bw_util_stat_metadata = plane_builder.GetOrCreateStatMetadata("Bandwidth Utilization"); XStatMetadata* num_msg_stat_metadata = plane_builder.GetOrCreateStatMetadata("Total Messages"); XStatMetadata* max_overlap_msg_stat_metadata = plane_builder.GetOrCreateStatMetadata("Max Overlapping Messages"); XStatMetadata* avg_msg_size_stat_metadata = plane_builder.GetOrCreateStatMetadata("Average Message Size (Bytes)"); for (const auto& host_burst : host_dcn_bursts_.GetBursts()) { float burst_mean_bw, bw_utilization; absl::string_view bw_level = GetBwInfo(false, host_burst, burst_mean_bw, bw_utilization); XEventMetadata* event_metadata = plane_builder.GetOrCreateEventMetadata(bw_level); XEventBuilder event = line.AddEvent(*event_metadata); event.SetOffsetNs(host_burst.start_timestamp_ns); event.SetDurationNs(host_burst.end_timestamp_ns - host_burst.start_timestamp_ns); event.ParseAndAddStatValue(*bw_stat_metadata, std::to_string(burst_mean_bw)); event.ParseAndAddStatValue(*bw_util_stat_metadata, std::to_string(bw_utilization)); event.AddStatValue(*num_msg_stat_metadata, host_burst.num_messages); event.AddStatValue(*max_overlap_msg_stat_metadata, host_burst.max_overlapping_messages); uint32_t avg_message_size = host_burst.burst_size_bytes / host_burst.num_messages; event.AddStatValue(*avg_msg_size_stat_metadata, avg_message_size); } } void DcnEventsProcessor::AddUnqualifiedCollectivesToXPlane( XPlaneBuilder& plane_builder, uint32_t tpu_idx) { XLineBuilder line = plane_builder.GetOrCreateLine(LineIdType::kDcnCollectiveTrafficMax); line.SetNameIfEmpty("Remaining collectives"); line.SetTimestampNs(0); for (const auto& col_item : tpu_collective_bursts_[tpu_idx]) { if (col_item.second.ToDisplay()) continue; for (const auto& col_burst : col_item.second.GetBursts()) { XEventMetadata* straggler_event_metadata = plane_builder.GetOrCreateEventMetadata(col_item.first); uint32_t stragglers_processed = 0; XStatMetadata* straggler_src_slice_stat_metadata = plane_builder.GetOrCreateStatMetadata("Source slice"); XStatMetadata* straggler_duration_ns_stat_metadata = plane_builder.GetOrCreateStatMetadata("Duration ns"); XStatMetadata* straggler_send_time_ns_stat_metadata = plane_builder.GetOrCreateStatMetadata("Send timestamp ns"); XStatMetadata* straggler_recv_time_ns_stat_metadata = plane_builder.GetOrCreateStatMetadata("Recv timestamp ns"); for (const auto& straggler : col_burst.stragglers) { XEventBuilder straggler_event = line.AddEvent(*straggler_event_metadata); straggler_event.SetOffsetNs(straggler.end_timestamp_ns - 10000); straggler_event.SetDurationNs(10000); straggler_event.AddStatValue(*straggler_src_slice_stat_metadata, straggler.src_slice_id); straggler_event.AddStatValue(*straggler_duration_ns_stat_metadata, straggler.duration_ns); straggler_event.AddStatValue( *straggler_send_time_ns_stat_metadata, straggler.end_timestamp_ns - straggler.duration_ns); straggler_event.AddStatValue(*straggler_recv_time_ns_stat_metadata, straggler.end_timestamp_ns); if (++stragglers_processed >= col_burst.num_messages) break; } } } } void DcnEventsProcessor::AddQualifiedCollectivesToXPlane( XPlaneBuilder& plane_builder, uint32_t tpu_idx) { uint32_t total_collectives = 0; for (const auto& col_item : tpu_collective_bursts_[tpu_idx]) { if (!col_item.second.ToDisplay()) continue; const std::string& col_name = col_item.first; XLineBuilder line = plane_builder.GetOrCreateLine( LineIdType::kDcnCollectiveTraffic + total_collectives++); line.SetNameIfEmpty(col_name); line.SetTimestampNs(0); XStatMetadata* bw_stat_metadata = plane_builder.GetOrCreateStatMetadata("Bandwidth (GBytes/sec)"); XStatMetadata* bw_util_stat_metadata = plane_builder.GetOrCreateStatMetadata("Bandwidth Utilization"); XStatMetadata* num_msg_stat_metadata = plane_builder.GetOrCreateStatMetadata("Total Messages"); XStatMetadata* max_overlap_msg_stat_metadata = plane_builder.GetOrCreateStatMetadata("Max Overlapping Messages"); XStatMetadata* avg_msg_size_stat_metadata = plane_builder.GetOrCreateStatMetadata("Average Message Size (Bytes)"); XStatMetadata* straggler_details_metadata = plane_builder.GetOrCreateStatMetadata("Straggler info:"); XStatMetadata* straggler_src_slice_stat_metadata = plane_builder.GetOrCreateStatMetadata("Source slice"); XStatMetadata* straggler_duration_ns_stat_metadata = plane_builder.GetOrCreateStatMetadata("Duration ns"); XStatMetadata* straggler_send_time_ns_stat_metadata = plane_builder.GetOrCreateStatMetadata("Send timestamp ns"); XStatMetadata* straggler_recv_time_ns_stat_metadata = plane_builder.GetOrCreateStatMetadata("Recv timestamp ns"); for (const auto& col_burst : col_item.second.GetBursts()) { float burst_mean_bw, bw_utilization; absl::string_view bw_level = GetBwInfo(true, col_burst, burst_mean_bw, bw_utilization); XEventMetadata* event_metadata = plane_builder.GetOrCreateEventMetadata(bw_level); XEventBuilder event = line.AddEvent(*event_metadata); event.SetOffsetNs(col_burst.start_timestamp_ns); event.SetDurationNs(col_burst.end_timestamp_ns - col_burst.start_timestamp_ns); event.ParseAndAddStatValue(*bw_stat_metadata, std::to_string(burst_mean_bw)); event.ParseAndAddStatValue(*bw_util_stat_metadata, std::to_string(bw_utilization)); event.AddStatValue(*num_msg_stat_metadata, col_burst.num_messages); event.AddStatValue(*max_overlap_msg_stat_metadata, col_burst.max_overlapping_messages); event.AddStatValue(*avg_msg_size_stat_metadata, col_burst.burst_size_bytes / col_burst.num_messages); XEventMetadata* straggler_event_metadata = plane_builder.GetOrCreateEventMetadata("Straggler"); uint32_t stragglers_processed = 0; std::string straggler_details = "Stragglers:\n"; for (const auto& straggler : col_burst.stragglers) { if (straggler.end_timestamp_ns == col_burst.end_timestamp_ns) { XEventBuilder straggler_event = line.AddEvent(*straggler_event_metadata); straggler_event.SetOffsetNs(straggler.end_timestamp_ns - straggler.duration_ns); straggler_event.SetDurationNs(straggler.duration_ns); straggler_event.AddStatValue(*straggler_src_slice_stat_metadata, straggler.src_slice_id); straggler_event.AddStatValue(*straggler_duration_ns_stat_metadata, straggler.duration_ns); straggler_event.AddStatValue( *straggler_send_time_ns_stat_metadata, straggler.end_timestamp_ns - straggler.duration_ns); straggler_event.AddStatValue(*straggler_recv_time_ns_stat_metadata, straggler.end_timestamp_ns); } straggler_details += " Src slice: " + std::to_string(straggler.src_slice_id) + " -- Duration (ns): " + std::to_string(straggler.duration_ns) + " -- [Send Timestamp, Recv Timestamp]: [" + std::to_string(straggler.end_timestamp_ns - straggler.duration_ns) + ", " + std::to_string(straggler.end_timestamp_ns) + "]\n"; if (++stragglers_processed >= col_burst.num_messages) break; } event.AddStatValue(*straggler_details_metadata, straggler_details); } } } void DcnEventsProcessor::AddTpuCollectiveDcnTrafficToXPlane( XPlane* device_xplane) { XPlaneBuilder plane_builder(device_xplane); auto tpu = tsl::profiler::GetTensorCoreId(plane_builder.Name()); if (!tpu.has_value()) return; uint32_t tpu_idx = FindTpuIdx(tpu.value()); AddQualifiedCollectivesToXPlane(plane_builder, tpu_idx); AddUnqualifiedCollectivesToXPlane(plane_builder, tpu_idx); } } }
#include "tensorflow/core/profiler/convert/dcn_analysis.h" #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/core/profiler/convert/dcn_utils.h" #include "tsl/profiler/utils/tf_xplane_visitor.h" #include "tsl/profiler/utils/xplane_schema.h" namespace tensorflow { namespace profiler { namespace { using tensorflow::profiler::DCN_MESSAGE_INVALID_BAD_KEY; using tensorflow::profiler::DCN_MESSAGE_INVALID_CLOCK_SKEW; using tensorflow::profiler::DCN_MESSAGE_VALID; using tensorflow::profiler::DCN_MESSAGE_VALID_LOOPBACK; using tensorflow::profiler::XEventBuilder; using tensorflow::profiler::XEventMetadata; using tensorflow::profiler::XLineBuilder; using tensorflow::profiler::XPlane; using tensorflow::profiler::XPlaneBuilder; using tensorflow::profiler::XPlaneVisitor; using tensorflow::profiler::XSpace; using ::testing::FieldsAre; using tsl::profiler::kMegaScaleDcnReceive; using tsl::profiler::kMegaScaleDcnSend; TEST(DcnAnalysis, SetupMessageInfoTest) { XSpace space; XPlane *host_trace = space.add_planes(); XPlaneBuilder host_trace_builder(host_trace); XEventMetadata *event_metadata_1 = host_trace_builder.GetOrCreateEventMetadata(1); event_metadata_1->set_name(std::string(kMegaScaleDcnReceive)); XEventMetadata *event_metadata_2 = host_trace_builder.GetOrCreateEventMetadata(2); event_metadata_2->set_name(std::string(kMegaScaleDcnSend)); XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); DcnEventsProcessor dcn_events_processor( 4, false); dcn_events_processor.SetupMessageInfo(plane); ASSERT_FALSE(dcn_events_processor.HasDcnMessages(kMegaScaleDcnSend)); ASSERT_TRUE(dcn_events_processor.HasDcnMessages(kMegaScaleDcnReceive)); ASSERT_FALSE(dcn_events_processor.HasDcnMessages("Another Message")); ASSERT_EQ(dcn_events_processor.MegaScaleMessageId(kMegaScaleDcnReceive), 1); ASSERT_EQ(dcn_events_processor.MegaScaleMessageId(kMegaScaleDcnSend), std::nullopt); } TEST(DcnAnalysis, CreateMessageTestValidMessages) { XSpace space; XPlane *host_trace = space.add_planes(); XPlaneBuilder xplane_builder(host_trace); XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1); event_metadata_1->set_name(std::string(kMegaScaleDcnReceive)); XLineBuilder xline_builder_0 = xplane_builder.GetOrCreateLine(0); XLineBuilder xline_builder_1 = xplane_builder.GetOrCreateLine(1); XEventBuilder event_builder = xline_builder_0.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(100000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "all-reduce.273_312"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 24); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 50); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 32768); event_builder = xline_builder_0.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(175000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "super-collective.1234"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 112); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 1); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 34); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 2); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 50); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1); event_builder = xline_builder_1.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(150000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "super-collective"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 9); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 0); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 75); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 10); XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); DcnEventsProcessor dcn_events_processor(4, false); dcn_events_processor.SetupMessageInfo(plane); dcn_events_processor.ProcessReceiveMessages(plane); ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 3); EXPECT_THAT(dcn_events_processor.GetMessage(0), FieldsAre("all-reduce.273_312", 2, 3, 1, 3, 50000, 100000, 50, 32768, 0, 24, DCN_MESSAGE_VALID)); EXPECT_THAT(dcn_events_processor.GetMessage(1), FieldsAre("super-collective.1234", 112, 1, 34, 2, 125000, 175000, 50, 1, 4, 0, DCN_MESSAGE_VALID)); EXPECT_THAT( dcn_events_processor.GetMessage(2), FieldsAre("super-collective", 9, 3, 0, 0, 75000, 150000, 75, 10, -1, -1, DCN_MESSAGE_VALID)); TimestampMap host_ts_map = dcn_events_processor.HostTsMap(); ASSERT_EQ(host_ts_map.size(), 6); for (const auto &ts_map_item : host_ts_map) { ASSERT_EQ(ts_map_item.first, ts_map_item.second->timestamp_ns); if (ts_map_item.first == 50000) { ASSERT_EQ(ts_map_item.second->duration_ns, 0); ASSERT_EQ(ts_map_item.second->message_diff, 1); ASSERT_EQ(ts_map_item.second->size_diff, 32768); } else if (ts_map_item.first == 125000) { ASSERT_EQ(ts_map_item.second->duration_ns, 0); ASSERT_EQ(ts_map_item.second->message_diff, 1); ASSERT_EQ(ts_map_item.second->size_diff, 1); } else if (ts_map_item.first == 75000) { ASSERT_EQ(ts_map_item.second->duration_ns, 0); ASSERT_EQ(ts_map_item.second->message_diff, 1); ASSERT_EQ(ts_map_item.second->size_diff, 10); } else if (ts_map_item.first == 100000) { ASSERT_EQ(ts_map_item.second->duration_ns, 50000); ASSERT_EQ(ts_map_item.second->message_diff, -1); ASSERT_EQ(ts_map_item.second->size_diff, -32768); } else if (ts_map_item.first == 175000) { ASSERT_EQ(ts_map_item.second->duration_ns, 50000); ASSERT_EQ(ts_map_item.second->message_diff, -1); ASSERT_EQ(ts_map_item.second->size_diff, -1); } else if (ts_map_item.first == 150000) { ASSERT_EQ(ts_map_item.second->duration_ns, 75000); ASSERT_EQ(ts_map_item.second->message_diff, -1); ASSERT_EQ(ts_map_item.second->size_diff, -10); } else { FAIL() << "Unexpected timestamp entry."; } } const std::vector<DcnBurst> &host_bursts = dcn_events_processor.GetHostBursts(); ASSERT_EQ(host_bursts.size(), 1); ASSERT_EQ(host_bursts[0].num_messages, 3); ASSERT_EQ(host_bursts[0].start_timestamp_ns, 50000); ASSERT_EQ(host_bursts[0].end_timestamp_ns, 175000); ASSERT_EQ(host_bursts[0].burst_size_bytes, 32779); ASSERT_EQ(host_bursts[0].max_overlapping_messages, 2); } TEST(DcnAnalysis, CreateLoopBackMessageTest) { XSpace space; XPlane *host_trace = space.add_planes(); XPlaneBuilder xplane_builder(host_trace); XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1); event_metadata_1->set_name(std::string(kMegaScaleDcnReceive)); XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0); XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(5000000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "all-gather.1234"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 2); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 1); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 40); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 1000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1000); XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); DcnEventsProcessor dcn_events_processor(4, false); dcn_events_processor.SetupMessageInfo(plane); dcn_events_processor.ProcessReceiveMessages(plane); ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 1); EXPECT_THAT(dcn_events_processor.GetMessage(0), FieldsAre("all-gather.1234", 2, 3, 2, 1, 4000000, 5000000, 1000, 1000, 4, 40, DCN_MESSAGE_VALID_LOOPBACK)); } TEST(DcnAnalysis, CreateZeroDurationMessageTest) { XSpace space; XPlane *host_trace = space.add_planes(); XPlaneBuilder xplane_builder(host_trace); XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1); event_metadata_1->set_name(std::string(kMegaScaleDcnReceive)); XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0); XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(20000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "all-reduce.273_312"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 1); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 25); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 512); XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); DcnEventsProcessor dcn_events_processor(4, false); dcn_events_processor.SetupMessageInfo(plane); dcn_events_processor.ProcessReceiveMessages(plane); EXPECT_THAT( dcn_events_processor.GetMessage(0), FieldsAre("all-reduce.273_312", 2, 3, 1, 1, 20000, 20000, 0, 512, 0, 25, DCN_MESSAGE_INVALID_CLOCK_SKEW)); } TEST(DcnAnalysis, CreateMissingKeyTest) { XSpace space; XPlane *host_trace = space.add_planes(); XPlaneBuilder xplane_builder(host_trace); XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1); event_metadata_1->set_name(std::string(kMegaScaleDcnReceive)); XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0); XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(50000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 10); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 100); XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); DcnEventsProcessor dcn_events_processor(4, false); dcn_events_processor.SetupMessageInfo(plane); dcn_events_processor.ProcessReceiveMessages(plane); EXPECT_THAT( dcn_events_processor.GetMessage(0), FieldsAre("", -1, -1, -1, -1, 40000, 50000, 10, 100, -1, -1, DCN_MESSAGE_INVALID_BAD_KEY)); } } } }
void DcnEventsProcessor::GenerateTimestampEvents( const DcnMessage& dcn_message) { std::shared_ptr<TimestampEvent> start_event( new TimestampEvent{dcn_message.start_timestamp_ns, 0, 1, dcn_message.size_bytes, dcn_message.slice_src}); std::shared_ptr<TimestampEvent> end_event(new TimestampEvent{ dcn_message.end_timestamp_ns, static_cast<uint64_t>(MicroToNano(dcn_message.duration_us)), -1, -1 * dcn_message.size_bytes, dcn_message.slice_src}); std::pair<uint64_t, std::shared_ptr<TimestampEvent>> start_event_entry = std::make_pair(dcn_message.start_timestamp_ns, start_event); std::pair<uint64_t, std::shared_ptr<TimestampEvent>> end_event_entry = std::make_pair(dcn_message.end_timestamp_ns, end_event); host_ts_map_.insert(start_event_entry); host_ts_map_.insert(end_event_entry); const std::string& collective_name = dcn_message.collective_name; uint32_t tpu_idx = FindTpuIdx(dcn_message.tpu_dst); auto& m = tpu_collective_ts_map_[tpu_idx][collective_name]; m.insert(start_event_entry); m.insert(end_event_entry); }
TEST(DcnAnalysis, CreateMessageTestValidMessages) { XSpace space; XPlane *host_trace = space.add_planes(); XPlaneBuilder xplane_builder(host_trace); XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1); event_metadata_1->set_name(std::string(kMegaScaleDcnReceive)); XLineBuilder xline_builder_0 = xplane_builder.GetOrCreateLine(0); XLineBuilder xline_builder_1 = xplane_builder.GetOrCreateLine(1); XEventBuilder event_builder = xline_builder_0.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(100000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "all-reduce.273_312"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 24); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 50); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 32768); event_builder = xline_builder_0.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(175000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "super-collective.1234"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 112); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 1); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 34); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 2); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 50); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1); event_builder = xline_builder_1.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(150000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "super-collective"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 9); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 0); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 75); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 10); XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); DcnEventsProcessor dcn_events_processor(4, false); dcn_events_processor.SetupMessageInfo(plane); dcn_events_processor.ProcessReceiveMessages(plane); ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 3); EXPECT_THAT(dcn_events_processor.GetMessage(0), FieldsAre("all-reduce.273_312", 2, 3, 1, 3, 50000, 100000, 50, 32768, 0, 24, DCN_MESSAGE_VALID)); EXPECT_THAT(dcn_events_processor.GetMessage(1), FieldsAre("super-collective.1234", 112, 1, 34, 2, 125000, 175000, 50, 1, 4, 0, DCN_MESSAGE_VALID)); EXPECT_THAT( dcn_events_processor.GetMessage(2), FieldsAre("super-collective", 9, 3, 0, 0, 75000, 150000, 75, 10, -1, -1, DCN_MESSAGE_VALID)); TimestampMap host_ts_map = dcn_events_processor.HostTsMap(); ASSERT_EQ(host_ts_map.size(), 6); for (const auto &ts_map_item : host_ts_map) { ASSERT_EQ(ts_map_item.first, ts_map_item.second->timestamp_ns); if (ts_map_item.first == 50000) { ASSERT_EQ(ts_map_item.second->duration_ns, 0); ASSERT_EQ(ts_map_item.second->message_diff, 1); ASSERT_EQ(ts_map_item.second->size_diff, 32768); } else if (ts_map_item.first == 125000) { ASSERT_EQ(ts_map_item.second->duration_ns, 0); ASSERT_EQ(ts_map_item.second->message_diff, 1); ASSERT_EQ(ts_map_item.second->size_diff, 1); } else if (ts_map_item.first == 75000) { ASSERT_EQ(ts_map_item.second->duration_ns, 0); ASSERT_EQ(ts_map_item.second->message_diff, 1); ASSERT_EQ(ts_map_item.second->size_diff, 10); } else if (ts_map_item.first == 100000) { ASSERT_EQ(ts_map_item.second->duration_ns, 50000); ASSERT_EQ(ts_map_item.second->message_diff, -1); ASSERT_EQ(ts_map_item.second->size_diff, -32768); } else if (ts_map_item.first == 175000) { ASSERT_EQ(ts_map_item.second->duration_ns, 50000); ASSERT_EQ(ts_map_item.second->message_diff, -1); ASSERT_EQ(ts_map_item.second->size_diff, -1); } else if (ts_map_item.first == 150000) { ASSERT_EQ(ts_map_item.second->duration_ns, 75000); ASSERT_EQ(ts_map_item.second->message_diff, -1); ASSERT_EQ(ts_map_item.second->size_diff, -10); } else { FAIL() << "Unexpected timestamp entry."; } } const std::vector<DcnBurst> &host_bursts = dcn_events_processor.GetHostBursts(); ASSERT_EQ(host_bursts.size(), 1); ASSERT_EQ(host_bursts[0].num_messages, 3); ASSERT_EQ(host_bursts[0].start_timestamp_ns, 50000); ASSERT_EQ(host_bursts[0].end_timestamp_ns, 175000); ASSERT_EQ(host_bursts[0].burst_size_bytes, 32779); ASSERT_EQ(host_bursts[0].max_overlapping_messages, 2); } TEST(DcnAnalysis, CreateLoopBackMessageTest) { XSpace space; XPlane *host_trace = space.add_planes(); XPlaneBuilder xplane_builder(host_trace); XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1); event_metadata_1->set_name(std::string(kMegaScaleDcnReceive)); XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0); XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(5000000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "all-gather.1234"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 2); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 1); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 40); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 1000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1000); XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); DcnEventsProcessor dcn_events_processor(4, false); dcn_events_processor.SetupMessageInfo(plane); dcn_events_processor.ProcessReceiveMessages(plane); ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 1); EXPECT_THAT(dcn_events_processor.GetMessage(0), FieldsAre("all-gather.1234", 2, 3, 2, 1, 4000000, 5000000, 1000, 1000, 4, 40, DCN_MESSAGE_VALID_LOOPBACK)); } TEST(DcnAnalysis, CreateZeroDurationMessageTest) { XSpace space; XPlane *host_trace = space.add_planes(); XPlaneBuilder xplane_builder(host_trace); XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1); event_metadata_1->set_name(std::string(kMegaScaleDcnReceive)); XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0); XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(20000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_label"), "all-reduce.273_312"); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"), 3); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1); event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata( "dcn_destination_per_slice_device_id"), 1); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 25); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 0); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 512); XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); DcnEventsProcessor dcn_events_processor(4, false); dcn_events_processor.SetupMessageInfo(plane); dcn_events_processor.ProcessReceiveMessages(plane); EXPECT_THAT( dcn_events_processor.GetMessage(0), FieldsAre("all-reduce.273_312", 2, 3, 1, 1, 20000, 20000, 0, 512, 0, 25, DCN_MESSAGE_INVALID_CLOCK_SKEW)); } TEST(DcnAnalysis, CreateMissingKeyTest) { XSpace space; XPlane *host_trace = space.add_planes(); XPlaneBuilder xplane_builder(host_trace); XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1); event_metadata_1->set_name(std::string(kMegaScaleDcnReceive)); XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0); XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1); event_builder.SetOffsetNs(50000); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("duration_us"), 10); event_builder.AddStatValue( *xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 100); XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); DcnEventsProcessor dcn_events_processor(4, false); dcn_events_processor.SetupMessageInfo(plane); dcn_events_processor.ProcessReceiveMessages(plane); EXPECT_THAT( dcn_events_processor.GetMessage(0), FieldsAre("", -1, -1, -1, -1, 40000, 50000, 10, 100, -1, -1, DCN_MESSAGE_INVALID_BAD_KEY)); }
#include "tensorflow/core/common_runtime/buf_rendezvous.h" #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/process_util.h" #include "tensorflow/core/framework/cancellation.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/notification.h" namespace tensorflow { namespace { void DeregisterCancellation(BufRendezvous::Hook* h) { if (h->cancellation_manager != nullptr) { h->cancellation_manager->DeregisterCallback(h->cancellation_token); h->cancellation_manager = nullptr; h->cancellation_token = CancellationManager::kInvalidToken; } } } BufRendezvous::~BufRendezvous() { mutex_lock l(mu_); if (!hook_table_.empty()) { PurgeTable(errors::Internal("Delete called on non-empty BufRendezvous"), &hook_table_); } } void BufRendezvous::StartAbort(const Status& s) { CHECK(!s.ok()); HookTable dummy_table; { mutex_lock l(mu_); status_.Update(StatusGroup::MakeDerived(s)); hook_table_.swap(dummy_table); } PurgeTable(s, &dummy_table); } void BufRendezvous::PurgeTable(const Status& s, HookTable* table) { for (auto& it : *table) { Hook* h = it.second; if (h->cancellation_manager != nullptr) { h->cancellation_manager->TryDeregisterCallback(h->cancellation_token); } if (h->cons_cb != nullptr) { h->cons_cb(s, nullptr); } if (h->prod_cb != nullptr) { h->prod_cb(s); } delete h; } table->clear(); } string BufRendezvous::Hook::DebugString() const { return absl::StrCat( "[dev:", (prod_dev ? prod_dev->name() : "none"), ", ctx:", reinterpret_cast<uint64>(prod_ctx), ", val:", reinterpret_cast<uint64>(prod_value), ", pcb:", prod_cb ? reinterpret_cast<uint64>(&prod_cb) : 0, ", ccb:", cons_cb ? reinterpret_cast<uint64>(&cons_cb) : 0, "]"); } void BufRendezvous::ProvideBuf(const string& key, Device* dev, DeviceContext* dev_ctx, const Tensor* v, const AllocatorAttributes& attr, const ProducerCallback& done, CancellationManager* cancellation_manager) { DVLOG(4) << "ProvideBuf: key = " << key; #ifndef NDEBUG if (VLOG_IS_ON(4)) { LogContents(); } #endif Hook* h = nullptr; Status providebuf_status; do { mutex_lock l(mu_); if (!status_.ok()) { providebuf_status = status_; break; } else { CancellationToken cancellation_token = CancellationManager::kInvalidToken; auto it = hook_table_.find(key); if (it == hook_table_.end()) { if (cancellation_manager != nullptr) { cancellation_token = cancellation_manager->get_cancellation_token(); } h = new Hook(cancellation_manager, cancellation_token); it = hook_table_.insert(std::make_pair(key, h)).first; } else { if (it->second->prod_cb != nullptr) { providebuf_status = errors::Internal( "BufRendezvous::ProvideBuf already called for key ", key); break; } h = it->second; } h->prod_dev = dev; h->prod_ctx = dev_ctx; h->prod_value = v; h->prod_attr = attr; h->prod_cb = done; if (h->cons_cb != nullptr) { hook_table_.erase(it); } else { if (cancellation_manager != nullptr && !cancellation_manager->RegisterCallback( cancellation_token, [this, key]() { CancelHook(key); })) { providebuf_status = errors::Cancelled( "Operation was cancelled for BufRendezvous key ", key); hook_table_.erase(it); delete h; } h = nullptr; } } } while (false); if (h) { DVLOG(4) << "ProvideBuf: key = " << key << ": calling cons_cb" << h->DebugString(); DeregisterCancellation(h); h->cons_cb(absl::OkStatus(), h); } if (!providebuf_status.ok()) { done(providebuf_status); } } void BufRendezvous::ConsumeBuf(const string& key, const string& device_name, const uint64 device_incarnation, const ConsumerCallback& done, CancellationManager* cancellation_manager) { DVLOG(4) << "ConsumeBuf: key = " << key << " device_name = " << device_name; #ifndef NDEBUG if (VLOG_IS_ON(4)) { LogContents(); } #endif Device* device; Status consumebuf_status = dev_mgr_->LookupDevice(device_name, &device); if (consumebuf_status.ok() && device->attributes().incarnation() != device_incarnation) { consumebuf_status = errors::FailedPrecondition( "RecvBuf expects a different device incarnation: ", device_incarnation, " vs. ", device->attributes().incarnation(), ". Your worker job that contains the device (\"", device_name, "\") was probably restarted. Check your " "worker job for the reason why it was restarted."); } if (!consumebuf_status.ok()) { done(consumebuf_status, nullptr); return; } Hook* existing_hook = nullptr; do { mutex_lock l(mu_); if (!status_.ok()) { consumebuf_status = status_; break; } auto it = hook_table_.find(key); if (it != hook_table_.end()) { if (it->second->cons_cb) { consumebuf_status = errors::Internal("Second consumer arrived for key ", key); break; } existing_hook = it->second; hook_table_.erase(it); existing_hook->cons_cb = done; } else { CancellationToken cancellation_token = CancellationManager::kInvalidToken; bool already_cancelled = false; if (cancellation_manager != nullptr) { cancellation_token = cancellation_manager->get_cancellation_token(); already_cancelled = !cancellation_manager->RegisterCallback( cancellation_token, [this, key]() { CancelHook(key); }); } if (already_cancelled) { consumebuf_status = errors::Cancelled( "Operation was cancelled for BufRendezvous key ", key); } else { Hook* h = new Hook(cancellation_manager, cancellation_token); h->cons_cb = done; it = hook_table_.insert(std::make_pair(key, h)).first; return; } } } while (false); if (existing_hook) { DVLOG(4) << "ConsumeBuf: key = " << key << ": calling cons_cb" << existing_hook->DebugString(); DeregisterCancellation(existing_hook); existing_hook->cons_cb(absl::OkStatus(), existing_hook); return; } if (!consumebuf_status.ok()) { done(consumebuf_status, nullptr); return; } } void BufRendezvous::CancelHook(const string& key) { Hook* h = nullptr; { mutex_lock l(mu_); auto it = hook_table_.find(key); if (it == hook_table_.end()) return; h = it->second; hook_table_.erase(it); } if (h != nullptr) { auto s = errors::Cancelled("Operation was cancelled for BufRendezvous key ", key); if (h->prod_cb != nullptr) { h->prod_cb(s); } if (h->cons_cb != nullptr) { h->cons_cb(s, nullptr); } delete h; } } void BufRendezvous::DoneWithHook(Hook* h) { h->prod_cb(absl::OkStatus()); delete h; } void BufRendezvous::LogContents() { mutex_lock l(mu_); LOG(INFO) << strings::StrCat("BufRendezvous ", strings::Hex(reinterpret_cast<uint64>(this)), " step_id=", step_id_, " current contents:"); for (const auto& it : hook_table_) { LOG(INFO) << it.first << ":" << it.second->DebugString(); } } }
#include "tensorflow/core/common_runtime/buf_rendezvous.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/notification.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class BufRendezvousTest : public ::testing::Test { protected: static std::unique_ptr<Device> NewDevice(const string& name, const string& type, const uint64 incarnation) { class FakeDevice : public Device { public: explicit FakeDevice(const DeviceAttributes& attrs) : Device(nullptr, attrs) {} Status Sync() override { return absl::OkStatus(); } Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; } }; DeviceAttributes attrs; attrs.set_name(name); attrs.set_device_type(type); attrs.set_incarnation(incarnation); return std::make_unique<FakeDevice>(attrs); } void InitializeDevice(const string& device, const string& type, const uint64 incarnation) { std::vector<std::unique_ptr<Device>> devices; devices.push_back(NewDevice(device, type, incarnation)); dev_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices)); br_ = std::make_unique<BufRendezvous>(123, dev_mgr_.get()); } BufRendezvousTest() : a_(Tensor(DT_FLOAT, TensorShape({24}))), b_(Tensor(DT_FLOAT, TensorShape({24}))), fake_device_context_(reinterpret_cast<DeviceContext*>(1024LLU)) { InitializeDevice(*kDefaultDeviceName, "CPU", kDefaultIncarnation); TF_CHECK_OK(dev_mgr_->LookupDevice(*kDefaultDeviceName, &default_device_)); } Tensor a_; Tensor b_; AllocatorAttributes aa_; Device* default_device_; DeviceContext* fake_device_context_; std::unique_ptr<DeviceMgr> dev_mgr_; std::unique_ptr<BufRendezvous> br_; CancellationManager cm_; static const string* const kDefaultKey; static const string* const kDefaultDeviceName; static const uint64 kDefaultIncarnation; }; const string* const BufRendezvousTest::kDefaultKey = new string("key0"); const string* const BufRendezvousTest::kDefaultDeviceName = new string("/device:CPU:0"); const uint64 BufRendezvousTest::kDefaultIncarnation = 12345; TEST_F(BufRendezvousTest, CorrectUseProducerFirst) { Status prod_status; Status cons_status; bool prod_callback_called = false; bool cons_callback_called = false; Notification note; br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&note, &prod_status, &prod_callback_called](const Status& s) { prod_status = s; prod_callback_called = true; note.Notify(); }, &cm_); EXPECT_FALSE(prod_callback_called); br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [this, &cons_status, &cons_callback_called](const Status& s, BufRendezvous::Hook* h) { cons_status = s; cons_callback_called = true; ASSERT_TRUE(h != nullptr); EXPECT_EQ(h->prod_dev, default_device_); EXPECT_EQ(h->prod_ctx, fake_device_context_); EXPECT_EQ(h->prod_value, &a_); br_->DoneWithHook(h); }, &cm_); EXPECT_TRUE(cons_callback_called); note.WaitForNotification(); EXPECT_TRUE(prod_callback_called); TF_EXPECT_OK(cons_status); TF_EXPECT_OK(prod_status); } TEST_F(BufRendezvousTest, CorrectUseConsumerFirst) { Status prod_status; Status cons_status; bool prod_callback_called = false; bool cons_callback_called = false; Notification note; br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [this, &cons_status, &cons_callback_called](const Status& s, BufRendezvous::Hook* h) { cons_status = s; cons_callback_called = true; ASSERT_TRUE(h != nullptr); EXPECT_EQ(h->prod_dev, default_device_); EXPECT_EQ(h->prod_ctx, fake_device_context_); EXPECT_EQ(h->prod_value, &a_); br_->DoneWithHook(h); }, &cm_); EXPECT_FALSE(cons_callback_called); br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&note, &prod_status, &prod_callback_called](const Status& s) { prod_status = s; prod_callback_called = true; note.Notify(); }, &cm_); EXPECT_TRUE(cons_callback_called); note.WaitForNotification(); EXPECT_TRUE(prod_callback_called); TF_EXPECT_OK(cons_status); TF_EXPECT_OK(prod_status); } TEST_F(BufRendezvousTest, ErrorDuplicatePut) { bool prod_callback_called = false; br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&prod_callback_called](const Status& s) { prod_callback_called = true; }, &cm_); Status bad_status; Notification note; br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&bad_status, &note](const Status& s) { bad_status = s; note.Notify(); }, &cm_); note.WaitForNotification(); EXPECT_FALSE(bad_status.ok()); EXPECT_EQ(absl::StrCat("BufRendezvous::ProvideBuf already called for key ", *kDefaultKey), bad_status.message()); EXPECT_FALSE(prod_callback_called); br_.reset(); } TEST_F(BufRendezvousTest, ErrorDeleteNonEmpty) { Status cons_status; br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&cons_status](const Status& s, BufRendezvous::Hook* h) { cons_status = s; EXPECT_EQ(h, nullptr); }, &cm_); EXPECT_TRUE(cons_status.ok()); br_.reset(); EXPECT_FALSE(cons_status.ok()); EXPECT_EQ("Delete called on non-empty BufRendezvous", cons_status.message()); } TEST_F(BufRendezvousTest, AbortNonEmpty) { Status cons_status; Status prod_status; Notification prod_note; Notification cons_note; br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&cons_note, &cons_status](const Status& s, BufRendezvous::Hook* h) { cons_status = s; cons_note.Notify(); }, &cm_); br_->ProvideBuf( "key1", default_device_, fake_device_context_, &a_, aa_, [&prod_note, &prod_status](const Status& s) { prod_status = s; prod_note.Notify(); }, &cm_); br_->StartAbort(errors::Internal("Falling sky detected")); prod_note.WaitForNotification(); cons_note.WaitForNotification(); EXPECT_FALSE(prod_status.ok()); EXPECT_EQ(prod_status.message(), "Falling sky detected"); EXPECT_FALSE(cons_status.ok()); EXPECT_EQ(cons_status.message(), "Falling sky detected"); } TEST_F(BufRendezvousTest, AbortEmpty) { br_->StartAbort(errors::Internal("Falling sky detected")); } TEST_F(BufRendezvousTest, UseAfterAbort) { br_->StartAbort(errors::Internal("Falling sky detected")); Status cons_status; Status prod_status; Notification prod_note; Notification cons_note; br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&cons_note, &cons_status](const Status& s, BufRendezvous::Hook* h) { cons_status = s; cons_note.Notify(); }, &cm_); br_->ProvideBuf( "key1", default_device_, fake_device_context_, &a_, aa_, [&prod_note, &prod_status](const Status& s) { prod_status = s; prod_note.Notify(); }, &cm_); prod_note.WaitForNotification(); cons_note.WaitForNotification(); EXPECT_FALSE(prod_status.ok()); EXPECT_NE(prod_status.message().find("Falling sky detected"), string::npos); EXPECT_FALSE(cons_status.ok()); EXPECT_NE(cons_status.message().find("Falling sky detected"), string::npos); } TEST_F(BufRendezvousTest, DeviceIncarnationMismatch) { Status cons_status; Notification note; br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [](const Status&) {}, nullptr); const uint64 incorrect_incarnation = 23456; br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, incorrect_incarnation, [&note, &cons_status](const Status& s, BufRendezvous::Hook* h) { cons_status = s; note.Notify(); }, nullptr); note.WaitForNotification(); EXPECT_TRUE(errors::IsFailedPrecondition(cons_status)); } TEST_F(BufRendezvousTest, ProvideThenCancel) { Status status; Notification note; br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&status, &note](const Status& s) { status = s; note.Notify(); }, &cm_); cm_.StartCancel(); note.WaitForNotification(); EXPECT_TRUE(errors::IsCancelled(status)); EXPECT_NE( status.message().find(absl::StrCat( "Operation was cancelled for BufRendezvous key ", *kDefaultKey)), string::npos); } TEST_F(BufRendezvousTest, CancelThenProvide) { Status status; Notification note; cm_.StartCancel(); br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&status, &note](const Status& s) { status = s; note.Notify(); }, &cm_); note.WaitForNotification(); EXPECT_TRUE(errors::IsCancelled(status)); EXPECT_NE( status.message().find(absl::StrCat( "Operation was cancelled for BufRendezvous key ", *kDefaultKey)), string::npos); } TEST_F(BufRendezvousTest, ConsumeThenCancel) { Status status; Notification note; br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&status, &note](const Status& s, BufRendezvous::Hook* h) { status = s; note.Notify(); }, &cm_); cm_.StartCancel(); note.WaitForNotification(); EXPECT_TRUE(errors::IsCancelled(status)); EXPECT_NE( status.message().find(absl::StrCat( "Operation was cancelled for BufRendezvous key ", *kDefaultKey)), string::npos); } TEST_F(BufRendezvousTest, CancelThenConsume) { Status status; Notification note; cm_.StartCancel(); br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&status, &note](const Status& s, BufRendezvous::Hook* h) { status = s; note.Notify(); }, &cm_); note.WaitForNotification(); EXPECT_TRUE(errors::IsCancelled(status)); EXPECT_NE( status.message().find(absl::StrCat( "Operation was cancelled for BufRendezvous key ", *kDefaultKey)), string::npos); } TEST_F(BufRendezvousTest, ProvideConsumeThenCancel) { Status prod_status; Status cons_status; bool prod_callback_called = false; bool cons_callback_called = false; Notification note; br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&note, &prod_status, &prod_callback_called](const Status& s) { prod_status = s; prod_callback_called = true; note.Notify(); }, &cm_); EXPECT_FALSE(prod_callback_called); br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [this, &cons_status, &cons_callback_called](const Status& s, BufRendezvous::Hook* h) { cons_status = s; cons_callback_called = true; ASSERT_TRUE(h != nullptr); EXPECT_EQ(h->prod_dev, default_device_); EXPECT_EQ(h->prod_ctx, fake_device_context_); EXPECT_EQ(h->prod_value, &a_); br_->DoneWithHook(h); }, &cm_); note.WaitForNotification(); cm_.StartCancel(); EXPECT_TRUE(cons_callback_called); EXPECT_TRUE(prod_callback_called); TF_EXPECT_OK(cons_status); TF_EXPECT_OK(prod_status); } TEST_F(BufRendezvousTest, CancelThenProvideConsume) { Status prod_status; Status cons_status; bool prod_callback_called = false; bool cons_callback_called = false; cm_.StartCancel(); br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&prod_status, &prod_callback_called](const Status& s) { prod_status = s; EXPECT_TRUE(errors::IsCancelled(prod_status)); prod_callback_called = true; }, &cm_); EXPECT_TRUE(prod_callback_called); EXPECT_TRUE(errors::IsCancelled(prod_status)); br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&cons_status, &cons_callback_called](const Status& s, BufRendezvous::Hook* h) { cons_status = s; EXPECT_TRUE(errors::IsCancelled(cons_status)); cons_callback_called = true; }, &cm_); EXPECT_TRUE(cons_callback_called); EXPECT_TRUE(errors::IsCancelled(cons_status)); } } }
void BufRendezvous::ConsumeBuf(const string& key, const string& device_name, const uint64 device_incarnation, const ConsumerCallback& done, CancellationManager* cancellation_manager) { DVLOG(4) << "ConsumeBuf: key = " << key << " device_name = " << device_name; #ifndef NDEBUG if (VLOG_IS_ON(4)) { LogContents(); } #endif Device* device; Status consumebuf_status = dev_mgr_->LookupDevice(device_name, &device); if (consumebuf_status.ok() && device->attributes().incarnation() != device_incarnation) { consumebuf_status = errors::FailedPrecondition( "RecvBuf expects a different device incarnation: ", device_incarnation, " vs. ", device->attributes().incarnation(), ". Your worker job that contains the device (\"", device_name, "\") was probably restarted. Check your " "worker job for the reason why it was restarted."); } if (!consumebuf_status.ok()) { done(consumebuf_status, nullptr); return; } Hook* existing_hook = nullptr; do { mutex_lock l(mu_); if (!status_.ok()) { consumebuf_status = status_; break; } auto it = hook_table_.find(key); if (it != hook_table_.end()) { if (it->second->cons_cb) { consumebuf_status = errors::Internal("Second consumer arrived for key ", key); break; } existing_hook = it->second; hook_table_.erase(it); existing_hook->cons_cb = done; } else { CancellationToken cancellation_token = CancellationManager::kInvalidToken; bool already_cancelled = false; if (cancellation_manager != nullptr) { cancellation_token = cancellation_manager->get_cancellation_token(); already_cancelled = !cancellation_manager->RegisterCallback( cancellation_token, [this, key]() { CancelHook(key); }); } if (already_cancelled) { consumebuf_status = errors::Cancelled( "Operation was cancelled for BufRendezvous key ", key); } else { Hook* h = new Hook(cancellation_manager, cancellation_token); h->cons_cb = done; it = hook_table_.insert(std::make_pair(key, h)).first; return; } } } while (false); if (existing_hook) { DVLOG(4) << "ConsumeBuf: key = " << key << ": calling cons_cb" << existing_hook->DebugString(); DeregisterCancellation(existing_hook); existing_hook->cons_cb(absl::OkStatus(), existing_hook); return; } if (!consumebuf_status.ok()) { done(consumebuf_status, nullptr); return; } }
TEST_F(BufRendezvousTest, CorrectUseProducerFirst) { Status prod_status; Status cons_status; bool prod_callback_called = false; bool cons_callback_called = false; Notification note; br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&note, &prod_status, &prod_callback_called](const Status& s) { prod_status = s; prod_callback_called = true; note.Notify(); }, &cm_); EXPECT_FALSE(prod_callback_called); br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [this, &cons_status, &cons_callback_called](const Status& s, BufRendezvous::Hook* h) { cons_status = s; cons_callback_called = true; ASSERT_TRUE(h != nullptr); EXPECT_EQ(h->prod_dev, default_device_); EXPECT_EQ(h->prod_ctx, fake_device_context_); EXPECT_EQ(h->prod_value, &a_); br_->DoneWithHook(h); }, &cm_); EXPECT_TRUE(cons_callback_called); note.WaitForNotification(); EXPECT_TRUE(prod_callback_called); TF_EXPECT_OK(cons_status); TF_EXPECT_OK(prod_status); } TEST_F(BufRendezvousTest, CorrectUseConsumerFirst) { Status prod_status; Status cons_status; bool prod_callback_called = false; bool cons_callback_called = false; Notification note; br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [this, &cons_status, &cons_callback_called](const Status& s, BufRendezvous::Hook* h) { cons_status = s; cons_callback_called = true; ASSERT_TRUE(h != nullptr); EXPECT_EQ(h->prod_dev, default_device_); EXPECT_EQ(h->prod_ctx, fake_device_context_); EXPECT_EQ(h->prod_value, &a_); br_->DoneWithHook(h); }, &cm_); EXPECT_FALSE(cons_callback_called); br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&note, &prod_status, &prod_callback_called](const Status& s) { prod_status = s; prod_callback_called = true; note.Notify(); }, &cm_); EXPECT_TRUE(cons_callback_called); note.WaitForNotification(); EXPECT_TRUE(prod_callback_called); TF_EXPECT_OK(cons_status); TF_EXPECT_OK(prod_status); } TEST_F(BufRendezvousTest, ErrorDeleteNonEmpty) { Status cons_status; br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&cons_status](const Status& s, BufRendezvous::Hook* h) { cons_status = s; EXPECT_EQ(h, nullptr); }, &cm_); EXPECT_TRUE(cons_status.ok()); br_.reset(); EXPECT_FALSE(cons_status.ok()); EXPECT_EQ("Delete called on non-empty BufRendezvous", cons_status.message()); } TEST_F(BufRendezvousTest, ConsumeThenCancel) { Status status; Notification note; br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&status, &note](const Status& s, BufRendezvous::Hook* h) { status = s; note.Notify(); }, &cm_); cm_.StartCancel(); note.WaitForNotification(); EXPECT_TRUE(errors::IsCancelled(status)); EXPECT_NE( status.message().find(absl::StrCat( "Operation was cancelled for BufRendezvous key ", *kDefaultKey)), string::npos); } TEST_F(BufRendezvousTest, CancelThenConsume) { Status status; Notification note; cm_.StartCancel(); br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&status, &note](const Status& s, BufRendezvous::Hook* h) { status = s; note.Notify(); }, &cm_); note.WaitForNotification(); EXPECT_TRUE(errors::IsCancelled(status)); EXPECT_NE( status.message().find(absl::StrCat( "Operation was cancelled for BufRendezvous key ", *kDefaultKey)), string::npos); } TEST_F(BufRendezvousTest, ProvideConsumeThenCancel) { Status prod_status; Status cons_status; bool prod_callback_called = false; bool cons_callback_called = false; Notification note; br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&note, &prod_status, &prod_callback_called](const Status& s) { prod_status = s; prod_callback_called = true; note.Notify(); }, &cm_); EXPECT_FALSE(prod_callback_called); br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [this, &cons_status, &cons_callback_called](const Status& s, BufRendezvous::Hook* h) { cons_status = s; cons_callback_called = true; ASSERT_TRUE(h != nullptr); EXPECT_EQ(h->prod_dev, default_device_); EXPECT_EQ(h->prod_ctx, fake_device_context_); EXPECT_EQ(h->prod_value, &a_); br_->DoneWithHook(h); }, &cm_); note.WaitForNotification(); cm_.StartCancel(); EXPECT_TRUE(cons_callback_called); EXPECT_TRUE(prod_callback_called); TF_EXPECT_OK(cons_status); TF_EXPECT_OK(prod_status); } TEST_F(BufRendezvousTest, CancelThenProvideConsume) { Status prod_status; Status cons_status; bool prod_callback_called = false; bool cons_callback_called = false; cm_.StartCancel(); br_->ProvideBuf( *kDefaultKey, default_device_, fake_device_context_, &a_, aa_, [&prod_status, &prod_callback_called](const Status& s) { prod_status = s; EXPECT_TRUE(errors::IsCancelled(prod_status)); prod_callback_called = true; }, &cm_); EXPECT_TRUE(prod_callback_called); EXPECT_TRUE(errors::IsCancelled(prod_status)); br_->ConsumeBuf( *kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation, [&cons_status, &cons_callback_called](const Status& s, BufRendezvous::Hook* h) { cons_status = s; EXPECT_TRUE(errors::IsCancelled(cons_status)); cons_callback_called = true; }, &cm_); EXPECT_TRUE(cons_callback_called); EXPECT_TRUE(errors::IsCancelled(cons_status)); }
#ifndef THIRD_PARTY_CEL_CPP_INTERNAL_NUMBER_H_ #define THIRD_PARTY_CEL_CPP_INTERNAL_NUMBER_H_ #include <cmath> #include <cstdint> #include <limits> #include "absl/types/variant.h" namespace cel::internal { constexpr int64_t kInt64Max = std::numeric_limits<int64_t>::max(); constexpr int64_t kInt64Min = std::numeric_limits<int64_t>::lowest(); constexpr uint64_t kUint64Max = std::numeric_limits<uint64_t>::max(); constexpr uint64_t kUintToIntMax = static_cast<uint64_t>(kInt64Max); constexpr double kDoubleToIntMax = static_cast<double>(kInt64Max); constexpr double kDoubleToIntMin = static_cast<double>(kInt64Min); constexpr double kDoubleToUintMax = static_cast<double>(kUint64Max); template <typename T> constexpr int RoundingError() { return 1 << (std::numeric_limits<T>::digits - std::numeric_limits<double>::digits - 1); } constexpr double kMaxDoubleRepresentableAsInt = static_cast<double>(kInt64Max - RoundingError<int64_t>()); constexpr double kMaxDoubleRepresentableAsUint = static_cast<double>(kUint64Max - RoundingError<uint64_t>()); #define CEL_ABSL_VISIT_CONSTEXPR using NumberVariant = absl::variant<double, uint64_t, int64_t>; enum class ComparisonResult { kLesser, kEqual, kGreater, kNanInequal }; constexpr ComparisonResult Invert(ComparisonResult result) { switch (result) { case ComparisonResult::kLesser: return ComparisonResult::kGreater; case ComparisonResult::kGreater: return ComparisonResult::kLesser; case ComparisonResult::kEqual: return ComparisonResult::kEqual; case ComparisonResult::kNanInequal: return ComparisonResult::kNanInequal; } } template <typename OutType> struct ConversionVisitor { template <typename InType> constexpr OutType operator()(InType v) { return static_cast<OutType>(v); } }; template <typename T> constexpr ComparisonResult Compare(T a, T b) { return (a > b) ? ComparisonResult::kGreater : (a == b) ? ComparisonResult::kEqual : ComparisonResult::kLesser; } constexpr ComparisonResult DoubleCompare(double a, double b) { if (!(a == a) || !(b == b)) { return ComparisonResult::kNanInequal; } return Compare(a, b); } struct DoubleCompareVisitor { constexpr explicit DoubleCompareVisitor(double v) : v(v) {} constexpr ComparisonResult operator()(double other) const { return DoubleCompare(v, other); } constexpr ComparisonResult operator()(uint64_t other) const { if (v > kDoubleToUintMax) { return ComparisonResult::kGreater; } else if (v < 0) { return ComparisonResult::kLesser; } else { return DoubleCompare(v, static_cast<double>(other)); } } constexpr ComparisonResult operator()(int64_t other) const { if (v > kDoubleToIntMax) { return ComparisonResult::kGreater; } else if (v < kDoubleToIntMin) { return ComparisonResult::kLesser; } else { return DoubleCompare(v, static_cast<double>(other)); } } double v; }; struct UintCompareVisitor { constexpr explicit UintCompareVisitor(uint64_t v) : v(v) {} constexpr ComparisonResult operator()(double other) const { return Invert(DoubleCompareVisitor(other)(v)); } constexpr ComparisonResult operator()(uint64_t other) const { return Compare(v, other); } constexpr ComparisonResult operator()(int64_t other) const { if (v > kUintToIntMax || other < 0) { return ComparisonResult::kGreater; } else { return Compare(v, static_cast<uint64_t>(other)); } } uint64_t v; }; struct IntCompareVisitor { constexpr explicit IntCompareVisitor(int64_t v) : v(v) {} constexpr ComparisonResult operator()(double other) { return Invert(DoubleCompareVisitor(other)(v)); } constexpr ComparisonResult operator()(uint64_t other) { return Invert(UintCompareVisitor(other)(v)); } constexpr ComparisonResult operator()(int64_t other) { return Compare(v, other); } int64_t v; }; struct CompareVisitor { explicit constexpr CompareVisitor(NumberVariant rhs) : rhs(rhs) {} CEL_ABSL_VISIT_CONSTEXPR ComparisonResult operator()(double v) { return absl::visit(DoubleCompareVisitor(v), rhs); } CEL_ABSL_VISIT_CONSTEXPR ComparisonResult operator()(uint64_t v) { return absl::visit(UintCompareVisitor(v), rhs); } CEL_ABSL_VISIT_CONSTEXPR ComparisonResult operator()(int64_t v) { return absl::visit(IntCompareVisitor(v), rhs); } NumberVariant rhs; }; struct LosslessConvertibleToIntVisitor { constexpr bool operator()(double value) const { return value >= kDoubleToIntMin && value <= kMaxDoubleRepresentableAsInt && value == static_cast<double>(static_cast<int64_t>(value)); } constexpr bool operator()(uint64_t value) const { return value <= kUintToIntMax; } constexpr bool operator()(int64_t value) const { return true; } }; struct LosslessConvertibleToUintVisitor { constexpr bool operator()(double value) const { return value >= 0 && value <= kMaxDoubleRepresentableAsUint && value == static_cast<double>(static_cast<uint64_t>(value)); } constexpr bool operator()(uint64_t value) const { return true; } constexpr bool operator()(int64_t value) const { return value >= 0; } }; class Number { public: static constexpr Number FromInt64(int64_t value) { return Number(value); } static constexpr Number FromUint64(uint64_t value) { return Number(value); } static constexpr Number FromDouble(double value) { return Number(value); } constexpr explicit Number(double double_value) : value_(double_value) {} constexpr explicit Number(int64_t int_value) : value_(int_value) {} constexpr explicit Number(uint64_t uint_value) : value_(uint_value) {} CEL_ABSL_VISIT_CONSTEXPR double AsDouble() const { return absl::visit(internal::ConversionVisitor<double>(), value_); } CEL_ABSL_VISIT_CONSTEXPR int64_t AsInt() const { return absl::visit(internal::ConversionVisitor<int64_t>(), value_); } CEL_ABSL_VISIT_CONSTEXPR uint64_t AsUint() const { return absl::visit(internal::ConversionVisitor<uint64_t>(), value_); } CEL_ABSL_VISIT_CONSTEXPR bool LosslessConvertibleToInt() const { return absl::visit(internal::LosslessConvertibleToIntVisitor(), value_); } CEL_ABSL_VISIT_CONSTEXPR bool LosslessConvertibleToUint() const { return absl::visit(internal::LosslessConvertibleToUintVisitor(), value_); } CEL_ABSL_VISIT_CONSTEXPR bool operator<(Number other) const { return Compare(other) == internal::ComparisonResult::kLesser; } CEL_ABSL_VISIT_CONSTEXPR bool operator<=(Number other) const { internal::ComparisonResult cmp = Compare(other); return cmp != internal::ComparisonResult::kGreater && cmp != internal::ComparisonResult::kNanInequal; } CEL_ABSL_VISIT_CONSTEXPR bool operator>(Number other) const { return Compare(other) == internal::ComparisonResult::kGreater; } CEL_ABSL_VISIT_CONSTEXPR bool operator>=(Number other) const { internal::ComparisonResult cmp = Compare(other); return cmp != internal::ComparisonResult::kLesser && cmp != internal::ComparisonResult::kNanInequal; } CEL_ABSL_VISIT_CONSTEXPR bool operator==(Number other) const { return Compare(other) == internal::ComparisonResult::kEqual; } CEL_ABSL_VISIT_CONSTEXPR bool operator!=(Number other) const { return Compare(other) != internal::ComparisonResult::kEqual; } template <typename T, typename Op> T visit(Op&& op) const { return absl::visit(std::forward<Op>(op), value_); } private: internal::NumberVariant value_; CEL_ABSL_VISIT_CONSTEXPR internal::ComparisonResult Compare( Number other) const { return absl::visit(internal::CompareVisitor(other.value_), value_); } }; } #endif
#include "internal/number.h" #include <cstdint> #include <limits> #include "internal/testing.h" namespace cel::internal { namespace { constexpr double kNan = std::numeric_limits<double>::quiet_NaN(); constexpr double kInfinity = std::numeric_limits<double>::infinity(); TEST(Number, Basic) { EXPECT_GT(Number(1.1), Number::FromInt64(1)); EXPECT_LT(Number::FromUint64(1), Number(1.1)); EXPECT_EQ(Number(1.1), Number(1.1)); EXPECT_EQ(Number::FromUint64(1), Number::FromUint64(1)); EXPECT_EQ(Number::FromInt64(1), Number::FromUint64(1)); EXPECT_GT(Number::FromUint64(1), Number::FromInt64(-1)); EXPECT_EQ(Number::FromInt64(-1), Number::FromInt64(-1)); } TEST(Number, Conversions) { EXPECT_TRUE(Number::FromDouble(1.0).LosslessConvertibleToInt()); EXPECT_TRUE(Number::FromDouble(1.0).LosslessConvertibleToUint()); EXPECT_FALSE(Number::FromDouble(1.1).LosslessConvertibleToInt()); EXPECT_FALSE(Number::FromDouble(1.1).LosslessConvertibleToUint()); EXPECT_TRUE(Number::FromDouble(-1.0).LosslessConvertibleToInt()); EXPECT_FALSE(Number::FromDouble(-1.0).LosslessConvertibleToUint()); EXPECT_TRUE(Number::FromDouble(kDoubleToIntMin).LosslessConvertibleToInt()); EXPECT_FALSE(Number::FromDouble(kMaxDoubleRepresentableAsUint + RoundingError<uint64_t>()) .LosslessConvertibleToUint()); EXPECT_FALSE(Number::FromDouble(kMaxDoubleRepresentableAsInt + RoundingError<int64_t>()) .LosslessConvertibleToInt()); EXPECT_FALSE( Number::FromDouble(kDoubleToIntMin - 1025).LosslessConvertibleToInt()); EXPECT_EQ(Number::FromInt64(1).AsUint(), 1u); EXPECT_EQ(Number::FromUint64(1).AsInt(), 1); EXPECT_EQ(Number::FromDouble(1.0).AsUint(), 1); EXPECT_EQ(Number::FromDouble(1.0).AsInt(), 1); } } }
class Number { public: static constexpr Number FromInt64(int64_t value) { return Number(value); }
TEST(Number, Basic) { EXPECT_GT(Number(1.1), Number::FromInt64(1)); EXPECT_LT(Number::FromUint64(1), Number(1.1)); EXPECT_EQ(Number(1.1), Number(1.1)); EXPECT_EQ(Number::FromUint64(1), Number::FromUint64(1)); EXPECT_EQ(Number::FromInt64(1), Number::FromUint64(1)); EXPECT_GT(Number::FromUint64(1), Number::FromInt64(-1)); EXPECT_EQ(Number::FromInt64(-1), Number::FromInt64(-1)); } TEST(Number, Conversions) { EXPECT_TRUE(Number::FromDouble(1.0).LosslessConvertibleToInt()); EXPECT_TRUE(Number::FromDouble(1.0).LosslessConvertibleToUint()); EXPECT_FALSE(Number::FromDouble(1.1).LosslessConvertibleToInt()); EXPECT_FALSE(Number::FromDouble(1.1).LosslessConvertibleToUint()); EXPECT_TRUE(Number::FromDouble(-1.0).LosslessConvertibleToInt()); EXPECT_FALSE(Number::FromDouble(-1.0).LosslessConvertibleToUint()); EXPECT_TRUE(Number::FromDouble(kDoubleToIntMin).LosslessConvertibleToInt()); EXPECT_FALSE(Number::FromDouble(kMaxDoubleRepresentableAsUint + RoundingError<uint64_t>()) .LosslessConvertibleToUint()); EXPECT_FALSE(Number::FromDouble(kMaxDoubleRepresentableAsInt + RoundingError<int64_t>()) .LosslessConvertibleToInt()); EXPECT_FALSE( Number::FromDouble(kDoubleToIntMin - 1025).LosslessConvertibleToInt()); EXPECT_EQ(Number::FromInt64(1).AsUint(), 1u); EXPECT_EQ(Number::FromUint64(1).AsInt(), 1); EXPECT_EQ(Number::FromDouble(1.0).AsUint(), 1); EXPECT_EQ(Number::FromDouble(1.0).AsInt(), 1); }
#include "quiche/quic/core/http/quic_spdy_stream_body_manager.h" #include <algorithm> #include "absl/strings/string_view.h" #include "quiche/quic/platform/api/quic_logging.h" namespace quic { QuicSpdyStreamBodyManager::QuicSpdyStreamBodyManager() : total_body_bytes_received_(0) {} size_t QuicSpdyStreamBodyManager::OnNonBody(QuicByteCount length) { QUICHE_DCHECK_NE(0u, length); if (fragments_.empty()) { return length; } fragments_.back().trailing_non_body_byte_count += length; return 0; } void QuicSpdyStreamBodyManager::OnBody(absl::string_view body) { QUICHE_DCHECK(!body.empty()); fragments_.push_back({body, 0}); total_body_bytes_received_ += body.length(); } size_t QuicSpdyStreamBodyManager::OnBodyConsumed(size_t num_bytes) { QuicByteCount bytes_to_consume = 0; size_t remaining_bytes = num_bytes; while (remaining_bytes > 0) { if (fragments_.empty()) { QUIC_BUG(quic_bug_10394_1) << "Not enough available body to consume."; return 0; } Fragment& fragment = fragments_.front(); const absl::string_view body = fragment.body; if (body.length() > remaining_bytes) { bytes_to_consume += remaining_bytes; fragment.body = body.substr(remaining_bytes); return bytes_to_consume; } remaining_bytes -= body.length(); bytes_to_consume += body.length() + fragment.trailing_non_body_byte_count; fragments_.pop_front(); } return bytes_to_consume; } int QuicSpdyStreamBodyManager::PeekBody(iovec* iov, size_t iov_len) const { QUICHE_DCHECK(iov); QUICHE_DCHECK_GT(iov_len, 0u); if (fragments_.empty()) { iov[0].iov_base = nullptr; iov[0].iov_len = 0; return 0; } size_t iov_filled = 0; while (iov_filled < fragments_.size() && iov_filled < iov_len) { absl::string_view body = fragments_[iov_filled].body; iov[iov_filled].iov_base = const_cast<char*>(body.data()); iov[iov_filled].iov_len = body.size(); iov_filled++; } return iov_filled; } size_t QuicSpdyStreamBodyManager::ReadableBytes() const { size_t count = 0; for (auto const& fragment : fragments_) { count += fragment.body.length(); } return count; } size_t QuicSpdyStreamBodyManager::ReadBody(const struct iovec* iov, size_t iov_len, size_t* total_bytes_read) { *total_bytes_read = 0; QuicByteCount bytes_to_consume = 0; size_t index = 0; char* dest = reinterpret_cast<char*>(iov[index].iov_base); size_t dest_remaining = iov[index].iov_len; while (!fragments_.empty()) { Fragment& fragment = fragments_.front(); const absl::string_view body = fragment.body; const size_t bytes_to_copy = std::min<size_t>(body.length(), dest_remaining); if (bytes_to_copy > 0) { memcpy(dest, body.data(), bytes_to_copy); } bytes_to_consume += bytes_to_copy; *total_bytes_read += bytes_to_copy; if (bytes_to_copy == body.length()) { bytes_to_consume += fragment.trailing_non_body_byte_count; fragments_.pop_front(); } else { fragment.body = body.substr(bytes_to_copy); } if (bytes_to_copy == dest_remaining) { ++index; if (index == iov_len) { break; } dest = reinterpret_cast<char*>(iov[index].iov_base); dest_remaining = iov[index].iov_len; } else { dest += bytes_to_copy; dest_remaining -= bytes_to_copy; } } return bytes_to_consume; } }
#include "quiche/quic/core/http/quic_spdy_stream_body_manager.h" #include <algorithm> #include <numeric> #include <string> #include <vector> #include "absl/base/macros.h" #include "absl/strings/string_view.h" #include "quiche/quic/platform/api/quic_expect_bug.h" #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/quic/platform/api/quic_test.h" namespace quic { namespace test { namespace { class QuicSpdyStreamBodyManagerTest : public QuicTest { protected: QuicSpdyStreamBodyManager body_manager_; }; TEST_F(QuicSpdyStreamBodyManagerTest, HasBytesToRead) { EXPECT_FALSE(body_manager_.HasBytesToRead()); EXPECT_EQ(body_manager_.ReadableBytes(), 0u); EXPECT_EQ(0u, body_manager_.total_body_bytes_received()); const QuicByteCount header_length = 3; EXPECT_EQ(header_length, body_manager_.OnNonBody(header_length)); EXPECT_FALSE(body_manager_.HasBytesToRead()); EXPECT_EQ(body_manager_.ReadableBytes(), 0u); EXPECT_EQ(0u, body_manager_.total_body_bytes_received()); std::string body(1024, 'a'); body_manager_.OnBody(body); EXPECT_TRUE(body_manager_.HasBytesToRead()); EXPECT_EQ(body_manager_.ReadableBytes(), 1024u); EXPECT_EQ(1024u, body_manager_.total_body_bytes_received()); } TEST_F(QuicSpdyStreamBodyManagerTest, ConsumeMoreThanAvailable) { std::string body(1024, 'a'); body_manager_.OnBody(body); size_t bytes_to_consume = 0; EXPECT_QUIC_BUG(bytes_to_consume = body_manager_.OnBodyConsumed(2048), "Not enough available body to consume."); EXPECT_EQ(0u, bytes_to_consume); } TEST_F(QuicSpdyStreamBodyManagerTest, OnBodyConsumed) { struct { std::vector<QuicByteCount> frame_header_lengths; std::vector<const char*> frame_payloads; std::vector<QuicByteCount> body_bytes_to_read; std::vector<QuicByteCount> expected_return_values; } const kOnBodyConsumedTestData[] = { {{2}, {"foobar"}, {6}, {6}}, {{3, 5}, {"foobar", "baz"}, {9}, {14}}, {{2}, {"foobar"}, {4, 2}, {4, 2}}, {{3, 5}, {"foobar", "baz"}, {6, 3}, {11, 3}}, {{3, 5}, {"foobar", "baz"}, {5, 4}, {5, 9}}, {{3, 5}, {"foobar", "baz"}, {7, 2}, {12, 2}}, }; for (size_t test_case_index = 0; test_case_index < ABSL_ARRAYSIZE(kOnBodyConsumedTestData); ++test_case_index) { const std::vector<QuicByteCount>& frame_header_lengths = kOnBodyConsumedTestData[test_case_index].frame_header_lengths; const std::vector<const char*>& frame_payloads = kOnBodyConsumedTestData[test_case_index].frame_payloads; const std::vector<QuicByteCount>& body_bytes_to_read = kOnBodyConsumedTestData[test_case_index].body_bytes_to_read; const std::vector<QuicByteCount>& expected_return_values = kOnBodyConsumedTestData[test_case_index].expected_return_values; for (size_t frame_index = 0; frame_index < frame_header_lengths.size(); ++frame_index) { EXPECT_EQ(frame_index == 0 ? frame_header_lengths[frame_index] : 0u, body_manager_.OnNonBody(frame_header_lengths[frame_index])); body_manager_.OnBody(frame_payloads[frame_index]); } for (size_t call_index = 0; call_index < body_bytes_to_read.size(); ++call_index) { EXPECT_EQ(expected_return_values[call_index], body_manager_.OnBodyConsumed(body_bytes_to_read[call_index])); } EXPECT_FALSE(body_manager_.HasBytesToRead()); EXPECT_EQ(body_manager_.ReadableBytes(), 0u); } } TEST_F(QuicSpdyStreamBodyManagerTest, PeekBody) { struct { std::vector<QuicByteCount> frame_header_lengths; std::vector<const char*> frame_payloads; size_t iov_len; } const kPeekBodyTestData[] = { {{}, {}, 1}, {{3}, {"foobar"}, 1}, {{3}, {"foobar"}, 2}, {{3, 5}, {"foobar", "baz"}, 1}, {{3, 5}, {"foobar", "baz"}, 2}, {{3, 5}, {"foobar", "baz"}, 3}, }; for (size_t test_case_index = 0; test_case_index < ABSL_ARRAYSIZE(kPeekBodyTestData); ++test_case_index) { const std::vector<QuicByteCount>& frame_header_lengths = kPeekBodyTestData[test_case_index].frame_header_lengths; const std::vector<const char*>& frame_payloads = kPeekBodyTestData[test_case_index].frame_payloads; size_t iov_len = kPeekBodyTestData[test_case_index].iov_len; QuicSpdyStreamBodyManager body_manager; for (size_t frame_index = 0; frame_index < frame_header_lengths.size(); ++frame_index) { EXPECT_EQ(frame_index == 0 ? frame_header_lengths[frame_index] : 0u, body_manager.OnNonBody(frame_header_lengths[frame_index])); body_manager.OnBody(frame_payloads[frame_index]); } std::vector<iovec> iovecs; iovecs.resize(iov_len); size_t iovs_filled = std::min(frame_payloads.size(), iov_len); ASSERT_EQ(iovs_filled, static_cast<size_t>(body_manager.PeekBody(&iovecs[0], iov_len))); for (size_t iovec_index = 0; iovec_index < iovs_filled; ++iovec_index) { EXPECT_EQ(frame_payloads[iovec_index], absl::string_view( static_cast<const char*>(iovecs[iovec_index].iov_base), iovecs[iovec_index].iov_len)); } } } TEST_F(QuicSpdyStreamBodyManagerTest, ReadBody) { struct { std::vector<QuicByteCount> frame_header_lengths; std::vector<const char*> frame_payloads; std::vector<std::vector<QuicByteCount>> iov_lengths; std::vector<QuicByteCount> expected_total_bytes_read; std::vector<QuicByteCount> expected_return_values; } const kReadBodyTestData[] = { {{4}, {"foo"}, {{2}}, {2}, {2}}, {{4}, {"foo"}, {{3}}, {3}, {3}}, {{4}, {"foo"}, {{5}}, {3}, {3}}, {{4}, {"foobar"}, {{2, 3}}, {5}, {5}}, {{4}, {"foobar"}, {{2, 4}}, {6}, {6}}, {{4}, {"foobar"}, {{2, 6}}, {6}, {6}}, {{4}, {"foobar"}, {{2, 4, 4, 3}}, {6}, {6}}, {{4}, {"foobar"}, {{2, 7, 4, 3}}, {6}, {6}}, {{4}, {"foobarbaz"}, {{2, 1}, {3, 2}}, {3, 5}, {3, 5}}, {{4}, {"foobarbaz"}, {{2, 1}, {4, 2}}, {3, 6}, {3, 6}}, {{4}, {"foobarbaz"}, {{2, 1}, {4, 10}}, {3, 6}, {3, 6}}, {{4, 3}, {"foobar", "baz"}, {{8}}, {8}, {11}}, {{4, 3}, {"foobar", "baz"}, {{9}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{10}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{4, 3}}, {7}, {10}}, {{4, 3}, {"foobar", "baz"}, {{4, 5}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{4, 6}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{4, 6, 4, 3}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{4, 7, 4, 3}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{2, 4}, {2, 1}}, {6, 3}, {9, 3}}, {{4, 3, 6}, {"foobar", "bazquux", "qux"}, {{4, 3}, {2, 3}, {5, 3}}, {7, 5, 4}, {10, 5, 10}}, }; for (size_t test_case_index = 0; test_case_index < ABSL_ARRAYSIZE(kReadBodyTestData); ++test_case_index) { const std::vector<QuicByteCount>& frame_header_lengths = kReadBodyTestData[test_case_index].frame_header_lengths; const std::vector<const char*>& frame_payloads = kReadBodyTestData[test_case_index].frame_payloads; const std::vector<std::vector<QuicByteCount>>& iov_lengths = kReadBodyTestData[test_case_index].iov_lengths; const std::vector<QuicByteCount>& expected_total_bytes_read = kReadBodyTestData[test_case_index].expected_total_bytes_read; const std::vector<QuicByteCount>& expected_return_values = kReadBodyTestData[test_case_index].expected_return_values; QuicSpdyStreamBodyManager body_manager; std::string received_body; for (size_t frame_index = 0; frame_index < frame_header_lengths.size(); ++frame_index) { EXPECT_EQ(frame_index == 0 ? frame_header_lengths[frame_index] : 0u, body_manager.OnNonBody(frame_header_lengths[frame_index])); body_manager.OnBody(frame_payloads[frame_index]); received_body.append(frame_payloads[frame_index]); } std::string read_body; for (size_t call_index = 0; call_index < iov_lengths.size(); ++call_index) { size_t total_iov_length = std::accumulate(iov_lengths[call_index].begin(), iov_lengths[call_index].end(), static_cast<size_t>(0)); std::string buffer(total_iov_length, 'z'); std::vector<iovec> iovecs; size_t offset = 0; for (size_t iov_length : iov_lengths[call_index]) { QUICHE_CHECK(offset + iov_length <= buffer.size()); iovecs.push_back({&buffer[offset], iov_length}); offset += iov_length; } size_t total_bytes_read = expected_total_bytes_read[call_index] + 12; EXPECT_EQ( expected_return_values[call_index], body_manager.ReadBody(&iovecs[0], iovecs.size(), &total_bytes_read)); read_body.append(buffer.substr(0, total_bytes_read)); } EXPECT_EQ(received_body.substr(0, read_body.size()), read_body); EXPECT_EQ(read_body.size() < received_body.size(), body_manager.HasBytesToRead()); } } TEST_F(QuicSpdyStreamBodyManagerTest, Clear) { const QuicByteCount header_length = 3; EXPECT_EQ(header_length, body_manager_.OnNonBody(header_length)); std::string body("foo"); body_manager_.OnBody(body); EXPECT_TRUE(body_manager_.HasBytesToRead()); body_manager_.Clear(); EXPECT_FALSE(body_manager_.HasBytesToRead()); iovec iov; size_t total_bytes_read = 5; EXPECT_EQ(0, body_manager_.PeekBody(&iov, 1)); EXPECT_EQ(0u, body_manager_.ReadBody(&iov, 1, &total_bytes_read)); } } } }
#include "quiche/quic/core/http/quic_spdy_stream_body_manager.h" #include <algorithm> #include "absl/strings/string_view.h" #include "quiche/quic/platform/api/quic_logging.h" namespace quic { QuicSpdyStreamBodyManager::QuicSpdyStreamBodyManager() : total_body_bytes_received_(0) {}
TEST_F(QuicSpdyStreamBodyManagerTest, HasBytesToRead) { EXPECT_FALSE(body_manager_.HasBytesToRead()); EXPECT_EQ(body_manager_.ReadableBytes(), 0u); EXPECT_EQ(0u, body_manager_.total_body_bytes_received()); const QuicByteCount header_length = 3; EXPECT_EQ(header_length, body_manager_.OnNonBody(header_length)); EXPECT_FALSE(body_manager_.HasBytesToRead()); EXPECT_EQ(body_manager_.ReadableBytes(), 0u); EXPECT_EQ(0u, body_manager_.total_body_bytes_received()); std::string body(1024, 'a'); body_manager_.OnBody(body); EXPECT_TRUE(body_manager_.HasBytesToRead()); EXPECT_EQ(body_manager_.ReadableBytes(), 1024u); EXPECT_EQ(1024u, body_manager_.total_body_bytes_received()); } TEST_F(QuicSpdyStreamBodyManagerTest, ConsumeMoreThanAvailable) { std::string body(1024, 'a'); body_manager_.OnBody(body); size_t bytes_to_consume = 0; EXPECT_QUIC_BUG(bytes_to_consume = body_manager_.OnBodyConsumed(2048), "Not enough available body to consume."); EXPECT_EQ(0u, bytes_to_consume); } TEST_F(QuicSpdyStreamBodyManagerTest, OnBodyConsumed) { struct { std::vector<QuicByteCount> frame_header_lengths; std::vector<const char*> frame_payloads; std::vector<QuicByteCount> body_bytes_to_read; std::vector<QuicByteCount> expected_return_values; } const kOnBodyConsumedTestData[] = { {{2}, {"foobar"}, {6}, {6}}, {{3, 5}, {"foobar", "baz"}, {9}, {14}}, {{2}, {"foobar"}, {4, 2}, {4, 2}}, {{3, 5}, {"foobar", "baz"}, {6, 3}, {11, 3}}, {{3, 5}, {"foobar", "baz"}, {5, 4}, {5, 9}}, {{3, 5}, {"foobar", "baz"}, {7, 2}, {12, 2}}, }; for (size_t test_case_index = 0; test_case_index < ABSL_ARRAYSIZE(kOnBodyConsumedTestData); ++test_case_index) { const std::vector<QuicByteCount>& frame_header_lengths = kOnBodyConsumedTestData[test_case_index].frame_header_lengths; const std::vector<const char*>& frame_payloads = kOnBodyConsumedTestData[test_case_index].frame_payloads; const std::vector<QuicByteCount>& body_bytes_to_read = kOnBodyConsumedTestData[test_case_index].body_bytes_to_read; const std::vector<QuicByteCount>& expected_return_values = kOnBodyConsumedTestData[test_case_index].expected_return_values; for (size_t frame_index = 0; frame_index < frame_header_lengths.size(); ++frame_index) { EXPECT_EQ(frame_index == 0 ? frame_header_lengths[frame_index] : 0u, body_manager_.OnNonBody(frame_header_lengths[frame_index])); body_manager_.OnBody(frame_payloads[frame_index]); } for (size_t call_index = 0; call_index < body_bytes_to_read.size(); ++call_index) { EXPECT_EQ(expected_return_values[call_index], body_manager_.OnBodyConsumed(body_bytes_to_read[call_index])); } EXPECT_FALSE(body_manager_.HasBytesToRead()); EXPECT_EQ(body_manager_.ReadableBytes(), 0u); } } TEST_F(QuicSpdyStreamBodyManagerTest, PeekBody) { struct { std::vector<QuicByteCount> frame_header_lengths; std::vector<const char*> frame_payloads; size_t iov_len; } const kPeekBodyTestData[] = { {{}, {}, 1}, {{3}, {"foobar"}, 1}, {{3}, {"foobar"}, 2}, {{3, 5}, {"foobar", "baz"}, 1}, {{3, 5}, {"foobar", "baz"}, 2}, {{3, 5}, {"foobar", "baz"}, 3}, }; for (size_t test_case_index = 0; test_case_index < ABSL_ARRAYSIZE(kPeekBodyTestData); ++test_case_index) { const std::vector<QuicByteCount>& frame_header_lengths = kPeekBodyTestData[test_case_index].frame_header_lengths; const std::vector<const char*>& frame_payloads = kPeekBodyTestData[test_case_index].frame_payloads; size_t iov_len = kPeekBodyTestData[test_case_index].iov_len; QuicSpdyStreamBodyManager body_manager; for (size_t frame_index = 0; frame_index < frame_header_lengths.size(); ++frame_index) { EXPECT_EQ(frame_index == 0 ? frame_header_lengths[frame_index] : 0u, body_manager.OnNonBody(frame_header_lengths[frame_index])); body_manager.OnBody(frame_payloads[frame_index]); } std::vector<iovec> iovecs; iovecs.resize(iov_len); size_t iovs_filled = std::min(frame_payloads.size(), iov_len); ASSERT_EQ(iovs_filled, static_cast<size_t>(body_manager.PeekBody(&iovecs[0], iov_len))); for (size_t iovec_index = 0; iovec_index < iovs_filled; ++iovec_index) { EXPECT_EQ(frame_payloads[iovec_index], absl::string_view( static_cast<const char*>(iovecs[iovec_index].iov_base), iovecs[iovec_index].iov_len)); } } } TEST_F(QuicSpdyStreamBodyManagerTest, ReadBody) { struct { std::vector<QuicByteCount> frame_header_lengths; std::vector<const char*> frame_payloads; std::vector<std::vector<QuicByteCount>> iov_lengths; std::vector<QuicByteCount> expected_total_bytes_read; std::vector<QuicByteCount> expected_return_values; } const kReadBodyTestData[] = { {{4}, {"foo"}, {{2}}, {2}, {2}}, {{4}, {"foo"}, {{3}}, {3}, {3}}, {{4}, {"foo"}, {{5}}, {3}, {3}}, {{4}, {"foobar"}, {{2, 3}}, {5}, {5}}, {{4}, {"foobar"}, {{2, 4}}, {6}, {6}}, {{4}, {"foobar"}, {{2, 6}}, {6}, {6}}, {{4}, {"foobar"}, {{2, 4, 4, 3}}, {6}, {6}}, {{4}, {"foobar"}, {{2, 7, 4, 3}}, {6}, {6}}, {{4}, {"foobarbaz"}, {{2, 1}, {3, 2}}, {3, 5}, {3, 5}}, {{4}, {"foobarbaz"}, {{2, 1}, {4, 2}}, {3, 6}, {3, 6}}, {{4}, {"foobarbaz"}, {{2, 1}, {4, 10}}, {3, 6}, {3, 6}}, {{4, 3}, {"foobar", "baz"}, {{8}}, {8}, {11}}, {{4, 3}, {"foobar", "baz"}, {{9}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{10}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{4, 3}}, {7}, {10}}, {{4, 3}, {"foobar", "baz"}, {{4, 5}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{4, 6}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{4, 6, 4, 3}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{4, 7, 4, 3}}, {9}, {12}}, {{4, 3}, {"foobar", "baz"}, {{2, 4}, {2, 1}}, {6, 3}, {9, 3}}, {{4, 3, 6}, {"foobar", "bazquux", "qux"}, {{4, 3}, {2, 3}, {5, 3}}, {7, 5, 4}, {10, 5, 10}}, }; for (size_t test_case_index = 0; test_case_index < ABSL_ARRAYSIZE(kReadBodyTestData); ++test_case_index) { const std::vector<QuicByteCount>& frame_header_lengths = kReadBodyTestData[test_case_index].frame_header_lengths; const std::vector<const char*>& frame_payloads = kReadBodyTestData[test_case_index].frame_payloads; const std::vector<std::vector<QuicByteCount>>& iov_lengths = kReadBodyTestData[test_case_index].iov_lengths; const std::vector<QuicByteCount>& expected_total_bytes_read = kReadBodyTestData[test_case_index].expected_total_bytes_read; const std::vector<QuicByteCount>& expected_return_values = kReadBodyTestData[test_case_index].expected_return_values; QuicSpdyStreamBodyManager body_manager; std::string received_body; for (size_t frame_index = 0; frame_index < frame_header_lengths.size(); ++frame_index) { EXPECT_EQ(frame_index == 0 ? frame_header_lengths[frame_index] : 0u, body_manager.OnNonBody(frame_header_lengths[frame_index])); body_manager.OnBody(frame_payloads[frame_index]); received_body.append(frame_payloads[frame_index]); } std::string read_body; for (size_t call_index = 0; call_index < iov_lengths.size(); ++call_index) { size_t total_iov_length = std::accumulate(iov_lengths[call_index].begin(), iov_lengths[call_index].end(), static_cast<size_t>(0)); std::string buffer(total_iov_length, 'z'); std::vector<iovec> iovecs; size_t offset = 0; for (size_t iov_length : iov_lengths[call_index]) { QUICHE_CHECK(offset + iov_length <= buffer.size()); iovecs.push_back({&buffer[offset], iov_length}); offset += iov_length; } size_t total_bytes_read = expected_total_bytes_read[call_index] + 12; EXPECT_EQ( expected_return_values[call_index], body_manager.ReadBody(&iovecs[0], iovecs.size(), &total_bytes_read)); read_body.append(buffer.substr(0, total_bytes_read)); } EXPECT_EQ(received_body.substr(0, read_body.size()), read_body); EXPECT_EQ(read_body.size() < received_body.size(), body_manager.HasBytesToRead()); } } TEST_F(QuicSpdyStreamBodyManagerTest, Clear) { const QuicByteCount header_length = 3; EXPECT_EQ(header_length, body_manager_.OnNonBody(header_length)); std::string body("foo"); body_manager_.OnBody(body); EXPECT_TRUE(body_manager_.HasBytesToRead()); body_manager_.Clear(); EXPECT_FALSE(body_manager_.HasBytesToRead()); iovec iov; size_t total_bytes_read = 5; EXPECT_EQ(0, body_manager_.PeekBody(&iov, 1)); EXPECT_EQ(0u, body_manager_.ReadBody(&iov, 1, &total_bytes_read)); }
#include "arolla/expr/optimization/peephole_optimizations/short_circuit_where.h" #include <functional> #include <memory> #include <utility> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/operators/type_meta_eval_strategies.h" #include "arolla/expr/optimization/peephole_optimizer.h" #include "arolla/memory/optional_value.h" #include "arolla/util/status_macros_backport.h" namespace arolla::expr { namespace { using ::arolla::expr_operators::type_meta::Is; std::function<bool(const ExprNodePtr&)> TypeMatches( expr_operators::type_meta::Strategy strategy) { return [strategy = std::move(strategy)](const ExprNodePtr& node) { return node->qtype() != nullptr && strategy({node->qtype()}).ok(); }; } absl::Status AddCoreWhereOptimizations( PeepholeOptimizationPack& optimizations) { ExprNodePtr cond = Placeholder("cond"); ExprNodePtr x = Placeholder("x"); ExprNodePtr y = Placeholder("y"); { ASSIGN_OR_RETURN(ExprNodePtr from, CallOpReference("core.where", {cond, x, y})); ASSIGN_OR_RETURN( ExprNodePtr to, CallOpReference("core._short_circuit_where", {cond, x, y})); ASSIGN_OR_RETURN(optimizations.emplace_back(), PeepholeOptimization::CreatePatternOptimization( from, to, {{"cond", TypeMatches(Is<OptionalUnit>)}})); } { ExprNodePtr shape = Placeholder("shape"); ASSIGN_OR_RETURN( ExprNodePtr from, CallOpReference("core.where", {CallOpReference("core.const_with_shape._array_shape", {shape, cond}), x, y})); ASSIGN_OR_RETURN( ExprNodePtr to, CallOpReference("core._short_circuit_where", {cond, x, y})); ASSIGN_OR_RETURN(optimizations.emplace_back(), PeepholeOptimization::CreatePatternOptimization( from, to, {{"cond", TypeMatches(Is<OptionalUnit>)}})); } return absl::OkStatus(); } absl::StatusOr<std::unique_ptr<PeepholeOptimization>> AlwaysTrueConditionOptimization() { ASSIGN_OR_RETURN( ExprNodePtr short_circuit_where, CallOpReference("core._short_circuit_where", {Literal(kPresent), Placeholder("x"), Placeholder("y")})); ExprNodePtr x = Placeholder("x"); return PeepholeOptimization::CreatePatternOptimization(short_circuit_where, x); } absl::StatusOr<std::unique_ptr<PeepholeOptimization>> AlwaysFalseConditionOptimization() { ASSIGN_OR_RETURN( ExprNodePtr short_circuit_where, CallOpReference("core._short_circuit_where", {Literal(kMissing), Placeholder("x"), Placeholder("y")})); ExprNodePtr y = Placeholder("y"); return PeepholeOptimization::CreatePatternOptimization(short_circuit_where, y); } } absl::StatusOr<PeepholeOptimizationPack> ShortCircuitWhereOptimizations() { std::vector<std::unique_ptr<PeepholeOptimization>> optimizations; RETURN_IF_ERROR(AddCoreWhereOptimizations(optimizations)); ASSIGN_OR_RETURN(optimizations.emplace_back(), AlwaysTrueConditionOptimization()); ASSIGN_OR_RETURN(optimizations.emplace_back(), AlwaysFalseConditionOptimization()); return optimizations; } }
#include "arolla/expr/optimization/peephole_optimizations/short_circuit_where.h" #include <cstdint> #include <memory> #include <utility> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/statusor.h" #include "arolla/dense_array/dense_array.h" #include "arolla/dense_array/qtype/types.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/expr_visitor.h" #include "arolla/expr/optimization/peephole_optimizations/bool.h" #include "arolla/expr/optimization/peephole_optimizations/const_with_shape.h" #include "arolla/expr/optimization/peephole_optimizer.h" #include "arolla/expr/testing/testing.h" #include "arolla/memory/optional_value.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/util/init_arolla.h" #include "arolla/util/testing/status_matchers_backport.h" #include "arolla/util/unit.h" #include "arolla/util/status_macros_backport.h" namespace arolla::expr { namespace { using ::arolla::testing::EqualsExpr; using ::arolla::testing::IsOkAndHolds; using ::arolla::testing::WithQTypeAnnotation; class ShortCircuitWhereOptimizationsTest : public ::testing::Test { protected: void SetUp() override { ASSERT_OK(InitArolla()); ASSERT_OK_AND_ASSIGN( optimizer_, CreatePeepholeOptimizer({ShortCircuitWhereOptimizations})); } absl::StatusOr<ExprNodePtr> ApplyOptimizer( absl::StatusOr<ExprNodePtr> status_or_expr) const { ASSIGN_OR_RETURN(auto expr, ToLowest(status_or_expr)); return ToLowest(optimizer_->ApplyToNode(expr)); } absl::StatusOr<ExprNodePtr> ToLowest( const absl::StatusOr<ExprNodePtr>& status_or_expr) const { if (!status_or_expr.ok()) { return std::move(status_or_expr).status(); } return ::arolla::expr::ToLowest(*status_or_expr); } std::unique_ptr<PeepholeOptimizer> optimizer_; }; TEST_F(ShortCircuitWhereOptimizationsTest, ScalarCondition) { ASSERT_OK_AND_ASSIGN( auto shape, WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>())); ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); ASSERT_OK_AND_ASSIGN( auto scalar_cond, WithQTypeAnnotation(Leaf("cond"), GetQType<OptionalUnit>())); { ASSERT_OK_AND_ASSIGN( auto actual_expr, ApplyOptimizer(CallOp("core.where", {scalar_cond, x_plus_y, x_mul_y}))); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(CallOp("core._short_circuit_where", {scalar_cond, x_plus_y, x_mul_y}))); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } { ASSERT_OK_AND_ASSIGN( auto actual_expr, ApplyOptimizer(CallOp("core.where", {CallOp("core.const_with_shape", {shape, scalar_cond}), x_plus_y, x_mul_y}))); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(CallOp("core._short_circuit_where", {scalar_cond, x_plus_y, x_mul_y}))); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } } TEST_F(ShortCircuitWhereOptimizationsTest, ArrayCondition) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); ASSERT_OK_AND_ASSIGN( auto array_cond, WithQTypeAnnotation(Leaf("cond"), GetDenseArrayQType<Unit>())); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core.where", {array_cond, x_plus_y, x_mul_y})); ASSERT_OK_AND_ASSIGN(auto actual_expr, ApplyOptimizer(expr)); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(expr)); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } TEST_F(ShortCircuitWhereOptimizationsTest, UntypedCondition) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); auto untyped_cond = Leaf("cond"); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core.where", {untyped_cond, x_plus_y, x_mul_y})); ASSERT_OK_AND_ASSIGN(auto actual_expr, ApplyOptimizer(expr)); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(expr)); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } TEST_F(ShortCircuitWhereOptimizationsTest, ConstScalarCondition) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); { ASSERT_OK_AND_ASSIGN( auto actual_expr, ApplyOptimizer(CallOp("core._short_circuit_where", {Literal(kPresent), x_plus_y, x_mul_y}))); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(x_plus_y)); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } { ASSERT_OK_AND_ASSIGN( auto actual_expr, ApplyOptimizer(CallOp("core._short_circuit_where", {Literal(kMissing), x_plus_y, x_mul_y}))); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(x_mul_y)); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } } TEST_F(ShortCircuitWhereOptimizationsTest, EndToEndWithBroadcastedCondition) { ASSERT_OK_AND_ASSIGN(auto peephole_optimizer, CreatePeepholeOptimizer({ShortCircuitWhereOptimizations, BoolOptimizations, ConstWithShapeOptimizations})); auto optimize = [&](const ExprNodePtr& node) -> absl::StatusOr<ExprNodePtr> { ASSIGN_OR_RETURN(auto new_node, ToLowerNode(node)); if (new_node->fingerprint() != node->fingerprint()) { return new_node; } return peephole_optimizer->ApplyToNode(node); }; ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<int32_t>())); auto true_case = WithQTypeAnnotation(Leaf("true_case"), GetQType<float>()); auto false_or_missing_case = WithQTypeAnnotation(Leaf("false_or_missing_case"), GetQType<float>()); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); ASSERT_OK_AND_ASSIGN( auto shape, CallOp("core.shape_of", {CallOp("core.has", {true_case})})); ASSERT_OK_AND_ASSIGN( auto relative_shape, CallOp("core.shape_of", {CallOp("core.has", {CallOp("core.const_with_shape", {shape, true_case})})})); ASSERT_OK_AND_ASSIGN( auto broadcasted_condition, CallOp( "bool.logical_or", {CallOp( "bool.equal", {CallOp("core.const_with_shape", {relative_shape, x_plus_y}), CallOp("core.const_with_shape", {relative_shape, Literal(1)})}), CallOp("bool.equal", {CallOp("core.const_with_shape", {relative_shape, x_mul_y}), CallOp("core.const_with_shape", {relative_shape, Literal(1)})})})); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("bool.logical_if", {broadcasted_condition, true_case, false_or_missing_case, false_or_missing_case})); ASSERT_OK_AND_ASSIGN( auto unbroadcasted_condition, CallOp("bool.logical_or", {CallOp("bool.equal", {x_plus_y, Literal(1)}), CallOp("bool.equal", {x_mul_y, Literal(1)})})); EXPECT_THAT(DeepTransform(relative_shape, optimize), IsOkAndHolds(EqualsExpr(ToLowest(shape)))); EXPECT_THAT( DeepTransform(broadcasted_condition, optimize), IsOkAndHolds(EqualsExpr(ToLowest( CallOp("core.const_with_shape", {shape, unbroadcasted_condition}))))); auto optimize_and_broadcast = [&](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> { ASSIGN_OR_RETURN(node, optimize(std::move(node))); auto true_literal = Literal(true); if (node->is_op() && node->op()->display_name() == "core.equal" && node->node_deps()[1]->fingerprint() == true_literal->fingerprint()) { return CallOp("core.equal", {node->node_deps()[0], CallOp("core.const_with_shape", {shape, true_literal})}); } return node; }; EXPECT_THAT(DeepTransform(expr, optimize_and_broadcast), IsOkAndHolds(EqualsExpr(ToLowest( CallOp("core._short_circuit_where", {CallOp("core.presence_or", {CallOp("core.equal", {x_plus_y, Literal(1)}), CallOp("core.equal", {x_mul_y, Literal(1)})}), true_case, false_or_missing_case}))))); } } }
absl::Status AddCoreWhereOptimizations( PeepholeOptimizationPack& optimizations) { ExprNodePtr cond = Placeholder("cond"); ExprNodePtr x = Placeholder("x"); ExprNodePtr y = Placeholder("y"); { ASSIGN_OR_RETURN(ExprNodePtr from, CallOpReference("core.where", {cond, x, y})); ASSIGN_OR_RETURN( ExprNodePtr to, CallOpReference("core._short_circuit_where", {cond, x, y})); ASSIGN_OR_RETURN(optimizations.emplace_back(), PeepholeOptimization::CreatePatternOptimization( from, to, {{"cond", TypeMatches(Is<OptionalUnit>)}})); } { ExprNodePtr shape = Placeholder("shape"); ASSIGN_OR_RETURN( ExprNodePtr from, CallOpReference("core.where", {CallOpReference("core.const_with_shape._array_shape", {shape, cond}), x, y})); ASSIGN_OR_RETURN( ExprNodePtr to, CallOpReference("core._short_circuit_where", {cond, x, y})); ASSIGN_OR_RETURN(optimizations.emplace_back(), PeepholeOptimization::CreatePatternOptimization( from, to, {{"cond", TypeMatches(Is<OptionalUnit>)}})); } return absl::OkStatus(); }
TEST_F(ShortCircuitWhereOptimizationsTest, ScalarCondition) { ASSERT_OK_AND_ASSIGN( auto shape, WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>())); ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); ASSERT_OK_AND_ASSIGN( auto scalar_cond, WithQTypeAnnotation(Leaf("cond"), GetQType<OptionalUnit>())); { ASSERT_OK_AND_ASSIGN( auto actual_expr, ApplyOptimizer(CallOp("core.where", {scalar_cond, x_plus_y, x_mul_y}))); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(CallOp("core._short_circuit_where", {scalar_cond, x_plus_y, x_mul_y}))); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } { ASSERT_OK_AND_ASSIGN( auto actual_expr, ApplyOptimizer(CallOp("core.where", {CallOp("core.const_with_shape", {shape, scalar_cond}), x_plus_y, x_mul_y}))); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(CallOp("core._short_circuit_where", {scalar_cond, x_plus_y, x_mul_y}))); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } } TEST_F(ShortCircuitWhereOptimizationsTest, ArrayCondition) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); ASSERT_OK_AND_ASSIGN( auto array_cond, WithQTypeAnnotation(Leaf("cond"), GetDenseArrayQType<Unit>())); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core.where", {array_cond, x_plus_y, x_mul_y})); ASSERT_OK_AND_ASSIGN(auto actual_expr, ApplyOptimizer(expr)); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(expr)); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } TEST_F(ShortCircuitWhereOptimizationsTest, UntypedCondition) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); auto untyped_cond = Leaf("cond"); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core.where", {untyped_cond, x_plus_y, x_mul_y})); ASSERT_OK_AND_ASSIGN(auto actual_expr, ApplyOptimizer(expr)); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(expr)); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } TEST_F(ShortCircuitWhereOptimizationsTest, ConstScalarCondition) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); { ASSERT_OK_AND_ASSIGN( auto actual_expr, ApplyOptimizer(CallOp("core._short_circuit_where", {Literal(kPresent), x_plus_y, x_mul_y}))); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(x_plus_y)); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } { ASSERT_OK_AND_ASSIGN( auto actual_expr, ApplyOptimizer(CallOp("core._short_circuit_where", {Literal(kMissing), x_plus_y, x_mul_y}))); ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(x_mul_y)); EXPECT_THAT(actual_expr, EqualsExpr(expected_expr)); } } TEST_F(ShortCircuitWhereOptimizationsTest, EndToEndWithBroadcastedCondition) { ASSERT_OK_AND_ASSIGN(auto peephole_optimizer, CreatePeepholeOptimizer({ShortCircuitWhereOptimizations, BoolOptimizations, ConstWithShapeOptimizations})); auto optimize = [&](const ExprNodePtr& node) -> absl::StatusOr<ExprNodePtr> { ASSIGN_OR_RETURN(auto new_node, ToLowerNode(node)); if (new_node->fingerprint() != node->fingerprint()) { return new_node; } return peephole_optimizer->ApplyToNode(node); }; ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<int32_t>())); auto true_case = WithQTypeAnnotation(Leaf("true_case"), GetQType<float>()); auto false_or_missing_case = WithQTypeAnnotation(Leaf("false_or_missing_case"), GetQType<float>()); ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y})); ASSERT_OK_AND_ASSIGN( auto shape, CallOp("core.shape_of", {CallOp("core.has", {true_case})})); ASSERT_OK_AND_ASSIGN( auto relative_shape, CallOp("core.shape_of", {CallOp("core.has", {CallOp("core.const_with_shape", {shape, true_case})})})); ASSERT_OK_AND_ASSIGN( auto broadcasted_condition, CallOp( "bool.logical_or", {CallOp( "bool.equal", {CallOp("core.const_with_shape", {relative_shape, x_plus_y}), CallOp("core.const_with_shape", {relative_shape, Literal(1)})}), CallOp("bool.equal", {CallOp("core.const_with_shape", {relative_shape, x_mul_y}), CallOp("core.const_with_shape", {relative_shape, Literal(1)})})})); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("bool.logical_if", {broadcasted_condition, true_case, false_or_missing_case, false_or_missing_case})); ASSERT_OK_AND_ASSIGN( auto unbroadcasted_condition, CallOp("bool.logical_or", {CallOp("bool.equal", {x_plus_y, Literal(1)}), CallOp("bool.equal", {x_mul_y, Literal(1)})})); EXPECT_THAT(DeepTransform(relative_shape, optimize), IsOkAndHolds(EqualsExpr(ToLowest(shape)))); EXPECT_THAT( DeepTransform(broadcasted_condition, optimize), IsOkAndHolds(EqualsExpr(ToLowest( CallOp("core.const_with_shape", {shape, unbroadcasted_condition}))))); auto optimize_and_broadcast = [&](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> { ASSIGN_OR_RETURN(node, optimize(std::move(node))); auto true_literal = Literal(true); if (node->is_op() && node->op()->display_name() == "core.equal" && node->node_deps()[1]->fingerprint() == true_literal->fingerprint()) { return CallOp("core.equal", {node->node_deps()[0], CallOp("core.const_with_shape", {shape, true_literal})}); } return node; }; EXPECT_THAT(DeepTransform(expr, optimize_and_broadcast), IsOkAndHolds(EqualsExpr(ToLowest( CallOp("core._short_circuit_where", {CallOp("core.presence_or", {CallOp("core.equal", {x_plus_y, Literal(1)}), CallOp("core.equal", {x_mul_y, Literal(1)})}), true_case, false_or_missing_case}))))); }
#include "tensorflow/core/util/example_proto_fast_parsing.h" #include <algorithm> #include <functional> #include <optional> #include <utility> #include <vector> #include "absl/base/casts.h" #include "absl/container/flat_hash_map.h" #include "tensorflow/core/example/example.pb.h" #include "tensorflow/core/example/feature.pb.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/lib/monitoring/counter.h" #include "tensorflow/core/platform/blocking_counter.h" #include "tensorflow/core/platform/byte_order.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/util/presized_cuckoo_map.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { namespace example { namespace { template <typename T> using SmallVector = gtl::InlinedVector<T, 4>; template <typename T> class LimitedArraySlice { public: using value_type = T; LimitedArraySlice(T* begin, size_t num_elements) : current_(begin), begin_(begin), end_(begin + num_elements) {} int64_t EndDistance() const { return end_ - current_; } void push_back(T&& value) { if (EndDistance() > 0) *current_ = std::move(value); ++current_; } T& construct_at_end() { DCHECK_GT(EndDistance(), 0); return *(current_++); } T& back() { return *(current_ - 1); } size_t size() const { return std::min(current_ - begin_, end_ - begin_); } void resize(size_t size) { current_ = begin_ + size; } T* data() { return begin_; } private: T* current_; T* begin_; T* end_; }; template <typename A> auto EnableAliasing(A* a) -> decltype(a->EnableAliasing(true), void()) { a->EnableAliasing(true); } template <typename A> void EnableAliasing(A&& a) {} uint8 PeekTag(protobuf::io::CodedInputStream* stream) { DCHECK(stream != nullptr); const void* ptr; int size; if (!stream->GetDirectBufferPointer(&ptr, &size)) return 0; return *static_cast<const uint8*>(ptr); } constexpr uint8 kVarintTag(uint32 tag) { return (tag << 3) | 0; } constexpr uint8 kDelimitedTag(uint32 tag) { return (tag << 3) | 2; } constexpr uint8 kFixed32Tag(uint32 tag) { return (tag << 3) | 5; } namespace parsed { class Feature { public: Feature() = default; explicit Feature(StringPiece serialized) : serialized_(serialized) {} Status ParseDataType(DataType* dtype) { DCHECK(dtype != nullptr); if (serialized_.empty()) { *dtype = DT_INVALID; return absl::OkStatus(); } uint8 oneof_tag = static_cast<uint8>(*serialized_.data()); serialized_.remove_prefix(1); switch (oneof_tag) { case kDelimitedTag(1): *dtype = DT_STRING; break; case kDelimitedTag(2): *dtype = DT_FLOAT; break; case kDelimitedTag(3): *dtype = DT_INT64; break; default: *dtype = DT_INVALID; return errors::InvalidArgument("Unsupported datatype."); } return absl::OkStatus(); } bool GetNumElementsInBytesList(int* num_elements) { protobuf::io::CodedInputStream stream( reinterpret_cast<const uint8*>(serialized_.data()), serialized_.size()); EnableAliasing(&stream); uint32 length = 0; if (!stream.ReadVarint32(&length)) return false; auto limit = stream.PushLimit(length); *num_elements = 0; while (!stream.ExpectAtEnd()) { if (!stream.ExpectTag(kDelimitedTag(1))) return false; uint32 bytes_length = 0; if (!stream.ReadVarint32(&bytes_length)) return false; if (!stream.Skip(bytes_length)) return false; ++*num_elements; } stream.PopLimit(limit); return true; } tstring* construct_at_end(LimitedArraySlice<tstring>* bytes_list) { if (bytes_list->EndDistance() <= 0) { return nullptr; } return &bytes_list->construct_at_end(); } tstring* construct_at_end(SmallVector<tstring>* bytes_list) { return &bytes_list->emplace_back(); } template <typename Result> bool ParseBytesList(Result* bytes_list) { DCHECK(bytes_list != nullptr); protobuf::io::CodedInputStream stream( reinterpret_cast<const uint8*>(serialized_.data()), serialized_.size()); EnableAliasing(&stream); uint32 length; if (!stream.ReadVarint32(&length)) return false; auto limit = stream.PushLimit(length); while (!stream.ExpectAtEnd()) { if (!stream.ExpectTag(kDelimitedTag(1))) return false; uint32 bytes_length; if (!stream.ReadVarint32(&bytes_length)) return false; tstring* bytes = construct_at_end(bytes_list); if (bytes == nullptr) return false; bytes->resize_uninitialized(bytes_length); if (!stream.ReadRaw(bytes->data(), bytes_length)) return false; } stream.PopLimit(limit); return true; } template <typename Result> bool ParseFloatList(Result* float_list) { DCHECK(float_list != nullptr); protobuf::io::CodedInputStream stream( reinterpret_cast<const uint8*>(serialized_.data()), serialized_.size()); EnableAliasing(&stream); uint32 length; if (!stream.ReadVarint32(&length)) return false; auto limit = stream.PushLimit(length); if (!stream.ExpectAtEnd()) { uint8 peek_tag = PeekTag(&stream); if (peek_tag != kDelimitedTag(1) && peek_tag != kFixed32Tag(1)) { return false; } constexpr int32_t kNumFloatBytes = 4; if (peek_tag == kDelimitedTag(1)) { if (!stream.ExpectTag(kDelimitedTag(1))) return false; uint32 packed_length; if (!stream.ReadVarint32(&packed_length)) return false; auto packed_limit = stream.PushLimit(packed_length); const size_t initial_size = float_list->size(); float_list->resize(initial_size + packed_length / kNumFloatBytes); if (port::kLittleEndian && sizeof(typename Result::value_type) == kNumFloatBytes) { const uint32 bytes_to_copy = std::min(static_cast<uint32>((float_list->size() - initial_size) * kNumFloatBytes), packed_length); if (!stream.ReadRaw(float_list->data() + initial_size, bytes_to_copy)) return false; } else { int64_t index = initial_size; while (!stream.ExpectAtEnd()) { uint32 buffer32; if (!stream.ReadLittleEndian32(&buffer32)) return false; if (index < float_list->size()) { float_list->data()[index] = absl::bit_cast<float>(buffer32); ++index; } } } stream.PopLimit(packed_limit); } else { const size_t initial_size = float_list->size(); const int64_t num_elements = stream.BytesUntilLimit() / (1 + kNumFloatBytes); float_list->resize(initial_size + num_elements); int64_t index = initial_size; while (!stream.ExpectAtEnd()) { if (!stream.ExpectTag(kFixed32Tag(1))) return false; uint32 buffer32; if (!stream.ReadLittleEndian32(&buffer32)) return false; float_list->data()[index] = absl::bit_cast<float>(buffer32); ++index; } } } stream.PopLimit(limit); return true; } template <typename Result> bool ParseInt64List(Result* int64_list) { DCHECK(int64_list != nullptr); protobuf::io::CodedInputStream stream( reinterpret_cast<const uint8*>(serialized_.data()), serialized_.size()); EnableAliasing(&stream); uint32 length; if (!stream.ReadVarint32(&length)) return false; auto limit = stream.PushLimit(length); if (!stream.ExpectAtEnd()) { uint8 peek_tag = PeekTag(&stream); if (peek_tag != kDelimitedTag(1) && peek_tag != kVarintTag(1)) { return false; } if (peek_tag == kDelimitedTag(1)) { if (!stream.ExpectTag(kDelimitedTag(1))) return false; uint32 packed_length; if (!stream.ReadVarint32(&packed_length)) return false; auto packed_limit = stream.PushLimit(packed_length); while (!stream.ExpectAtEnd()) { protobuf_uint64 n; if (!stream.ReadVarint64(&n)) return false; int64_list->push_back(static_cast<int64_t>(n)); } stream.PopLimit(packed_limit); } else { while (!stream.ExpectAtEnd()) { if (!stream.ExpectTag(kVarintTag(1))) return false; protobuf_uint64 n; if (!stream.ReadVarint64(&n)) return false; int64_list->push_back(static_cast<int64_t>(n)); } } } stream.PopLimit(limit); return true; } StringPiece GetSerialized() const { return serialized_; } private: StringPiece serialized_; }; using FeatureMapEntry = std::pair<StringPiece, Feature>; using Example = std::vector<FeatureMapEntry>; } inline bool SkipExtraneousTag(protobuf::io::CodedInputStream* stream) { uint32 data; protobuf_uint64 dummy; switch (stream->ReadTag() & 0x7) { case 0: if (!stream->ReadVarint32(&data)) return false; return true; case 1: if (!stream->ReadLittleEndian64(&dummy)) return false; return true; case 2: if (!stream->ReadVarint32(&data)) return false; stream->Skip(data); return true; case 3: return false; case 4: return false; case 5: if (!stream->ReadLittleEndian32(&data)) return false; return true; } return false; } bool ParseString(protobuf::io::CodedInputStream* stream, StringPiece* result) { DCHECK(stream != nullptr); DCHECK(result != nullptr); uint32 length; if (!stream->ReadVarint32(&length)) return false; if (length == 0) { *result = StringPiece(nullptr, 0); return true; } const void* stream_alias; int stream_size; if (!stream->GetDirectBufferPointer(&stream_alias, &stream_size)) { return false; } if (static_cast<uint32>(stream_size) < length) return false; *result = StringPiece(static_cast<const char*>(stream_alias), length); stream->Skip(length); return true; } bool ParseFeatureMapEntry(protobuf::io::CodedInputStream* stream, parsed::FeatureMapEntry* feature_map_entry) { DCHECK(stream != nullptr); DCHECK(feature_map_entry != nullptr); uint32 length; if (!stream->ReadVarint32(&length)) return false; auto limit = stream->PushLimit(length); for (int n = 0; n < 2; ++n) { const uint32_t tag = stream->ReadTag(); switch (tag) { case kDelimitedTag(1): if (!ParseString(stream, &feature_map_entry->first)) return false; break; case kDelimitedTag(2): { StringPiece feature_string_piece; if (!ParseString(stream, &feature_string_piece)) return false; feature_map_entry->second = parsed::Feature(feature_string_piece); break; } default: return false; } } if (!stream->ExpectAtEnd()) return false; stream->PopLimit(limit); return true; } bool ParseFeatures(protobuf::io::CodedInputStream* stream, parsed::Example* example) { DCHECK(stream != nullptr); DCHECK(example != nullptr); uint32 length; if (!stream->ReadVarint32(&length)) return false; auto limit = stream->PushLimit(length); while (!stream->ExpectAtEnd()) { parsed::FeatureMapEntry feature_map_entry; if (!stream->ExpectTag(kDelimitedTag(1))) return false; if (!ParseFeatureMapEntry(stream, &feature_map_entry)) return false; example->push_back(std::move(feature_map_entry)); } stream->PopLimit(limit); return true; } bool ParseExample(protobuf::io::CodedInputStream* stream, parsed::Example* example) { DCHECK(stream != nullptr); DCHECK(example != nullptr); while (!stream->ExpectAtEnd()) { if (!stream->ExpectTag(kDelimitedTag(1))) { if (!SkipExtraneousTag(stream)) return false; } else { if (!ParseFeatures(stream, example)) return false; } } return true; } bool ParseExample(StringPiece serialized, parsed::Example* example) { DCHECK(example != nullptr); protobuf::io::CodedInputStream stream( reinterpret_cast<const uint8*>(serialized.data()), serialized.size()); EnableAliasing(&stream); return ParseExample(&stream, example); } } bool TestFastParse(const string& serialized, Example* example) { DCHECK(example != nullptr); parsed::Example parsed_example; if (!ParseExample(serialized, &parsed_example)) return false; auto& features = *example->mutable_features(); size_t parsed_example_size = parsed_example.size(); for (size_t i = 0; i < parsed_example_size; ++i) { parsed::FeatureMapEntry& name_and_feature = parsed_example[parsed_example_size - i - 1]; string name(name_and_feature.first); if ((*features.mutable_feature()).count(name) > 0) continue; auto& value = (*features.mutable_feature())[name]; DataType dtype; if (!name_and_feature.second.ParseDataType(&dtype).ok()) return false; switch (dtype) { case DT_INVALID: break; case DT_STRING: { SmallVector<tstring> list; if (!name_and_feature.second.ParseBytesList(&list)) return false; auto* result_list = value.mutable_bytes_list(); for (auto& bytes : list) { result_list->add_value(bytes.data(), bytes.size()); } break; } case DT_FLOAT: { SmallVector<float> list; if (!name_and_feature.second.ParseFloatList(&list)) return false; auto* result_list = value.mutable_float_list(); for (float f : list) { result_list->add_value(f); } break; } case DT_INT64: { SmallVector<int64_t> list; if (!name_and_feature.second.ParseInt64List(&list)) return false; auto* result_list = value.mutable_int64_list(); for (int64_t i : list) { result_list->add_value(i); } break; } default: LOG(FATAL) << "Should not happen."; } } return true; } namespace { using Config = FastParseExampleConfig; void ParallelFor(const std::function<void(size_t)>& f, size_t n, thread::ThreadPool* thread_pool) { if (n == 0) return; if (thread_pool == nullptr) { for (size_t i = 0; i < n; ++i) { f(i); } } else { BlockingCounter counter(n - 1); for (size_t i = 1; i < n; ++i) { thread_pool->Schedule([i, &f, &counter] { f(i); counter.DecrementCount(); }); } f(0); counter.Wait(); } } enum class Type { Dense, Sparse, Ragged }; struct SparseBuffer { SmallVector<tstring> bytes_list; SmallVector<float> float_list; SmallVector<int64_t> int64_list; std::vector<size_t> example_end_indices; }; struct SeededHasher { uint64 operator()(StringPiece s) const { return Hash64(s.data(), s.size(), seed); } uint64 seed{0xDECAFCAFFE}; }; void LogDenseFeatureDataLoss(StringPiece feature_name) { LOG(WARNING) << "Data loss! Feature '" << feature_name << "' is present in multiple concatenated " "tf.Examples. Ignoring all but last one."; static auto* duplicated_dense_feature = monitoring::Counter<0>::New( "/tensorflow/core/util/example_proto_fast_parsing/" "duplicated_dense_feature", "Dense feature appears twice in a tf.Example"); duplicated_dense_feature->GetCell()->IncrementBy(1); } void LogSparseFeatureDataLoss(StringPiece feature_name) { LOG(WARNING) << "Data loss! Feature '" << feature_name << "' is present in multiple concatenated " "tf.Examples. Ignoring all but last one."; static auto* duplicated_sparse_feature = monitoring::Counter<0>::New( "/tensorflow/core/util/example_proto_fast_parsing/" "duplicated_sparse_feature", "Sparse feature appears twice in a tf.Example"); duplicated_sparse_feature->GetCell()->IncrementBy(1); } Status FastParseSerializedExample( const tstring& serialized_example, const tstring& example_name, const size_t example_index, const Config& config, const PresizedCuckooMap<std::pair<size_t, Type>>& config_index, SeededHasher hasher, std::vector<Tensor>* output_dense, std::vector<SparseBuffer>* output_varlen_dense, std::vector<SparseBuffer>* output_sparse, std::vector<SparseBuffer>* output_ragged, PerExampleFeatureStats* output_stats) { DCHECK(output_dense != nullptr); DCHECK(output_sparse != nullptr); DCHECK(output_ragged != nullptr); parsed::Example parsed_example; if (!ParseExample(serialized_example, &parsed_example)) { return errors::InvalidArgument("Could not parse example input, value: '", serialized_example, "'"); } std::vector<int64_t> sparse_feature_last_example(config.sparse.size(), -1); std::vector<int64_t> dense_feature_last_example(config.dense.size(), -1); std::vector<int64_t> ragged_feature_last_example(config.ragged.size(), -1); const size_t parsed_example_size = parsed_example.size(); if (output_stats) { output_stats->features_count = parsed_example_size; } for (size_t i = 0; i < parsed_example_size; ++i) { parsed::FeatureMapEntry& name_and_feature = parsed_example[parsed_example_size - i - 1]; const StringPiece feature_name = name_and_feature.first; parsed::Feature& feature = name_and_feature.second; std::pair<size_t, Type> d_and_type; uint64 h = hasher(feature_name); if (!config_index.Find(h, &d_and_type)) continue; size_t d = d_and_type.first; bool is_dense = d_and_type.second == Type::Dense; bool is_ragged = d_and_type.second == Type::Ragged; { const tstring& config_feature_name = is_dense ? config.dense[d].feature_name : (is_ragged ? config.ragged[d].feature_name : config.sparse[d].feature_name); if (feature_name != config_feature_name) continue; } auto example_error = [&](StringPiece suffix) { return errors::InvalidArgument("Name: ", example_name, ", Key: ", feature_name, ", Index: ", example_index, ". ", suffix); }; auto parse_error = [&] { return example_error("Can't parse serialized Example."); }; DataType example_dtype; TF_RETURN_IF_ERROR(feature.ParseDataType(&example_dtype)); if (is_dense) { if (example_dtype == DT_INVALID) continue; if (dense_feature_last_example[d] == example_index) { LogDenseFeatureDataLoss(feature_name); continue; } dense_feature_last_example[d] = example_index; if (example_dtype != config.dense[d].dtype) { return example_error(strings::StrCat( "Data types don't match. Data type: ", DataTypeString(example_dtype), " but expected type: ", DataTypeString(config.dense[d].dtype))); } if (!config.dense[d].variable_length) { Tensor& out = (*output_dense)[d]; const std::size_t num_elements = config.dense[d].elements_per_stride; if (output_stats) { output_stats->feature_values_count += num_elements; } const std::size_t offset = example_index * num_elements; auto shape_error = [&](size_t size, StringPiece type_str) { return example_error(strings::StrCat( "Number of ", type_str, " values != expected. " "Values size: ", size, " but output shape: ", config.dense[d].shape.DebugString())); }; switch (config.dense[d].dtype) { case DT_INT64: { auto out_p = out.flat<int64_t>().data() + offset; LimitedArraySlice<int64_t> slice(out_p, num_elements); if (!feature.ParseInt64List(&slice)) return parse_error(); if (slice.EndDistance() != 0) { return shape_error(num_elements - slice.EndDistance(), "int64"); } break; } case DT_FLOAT: { auto out_p = out.flat<float>().data() + offset; LimitedArraySlice<float> slice(out_p, num_elements); if (!feature.ParseFloatList(&slice)) return parse_error(); if (slice.EndDistance() != 0) { return shape_error(num_elements - slice.EndDistance(), "float"); } break; } case DT_STRING: { auto out_p = out.flat<tstring>().data() + offset; LimitedArraySlice<tstring> slice(out_p, num_elements); if (!feature.ParseBytesList(&slice)) return parse_error(); if (slice.EndDistance() != 0) { return shape_error(num_elements - slice.EndDistance(), "bytes"); } break; } default: LOG(FATAL) << "Should not happen."; } } else { SparseBuffer& out = (*output_varlen_dense)[d]; const std::size_t num_elements = config.dense[d].elements_per_stride; if (example_dtype != DT_INVALID && example_dtype != config.dense[d].dtype) { return example_error(strings::StrCat( "Data types don't match. ", "Expected type: ", DataTypeString(config.dense[d].dtype))); } auto shape_error = [&](size_t size, StringPiece type_str) { return example_error(strings::StrCat( "Number of ", type_str, " values is not a multiple of stride length. Saw ", size, " values but output shape is: ", config.dense[d].shape.DebugString())); }; switch (config.dense[d].dtype) { case DT_INT64: { if (example_dtype != DT_INVALID) { if (!feature.ParseInt64List(&out.int64_list)) { return parse_error(); } if (out.int64_list.size() % num_elements != 0) { return shape_error(out.int64_list.size(), "int64"); } } out.example_end_indices.push_back(out.int64_list.size()); break; } case DT_FLOAT: { if (example_dtype != DT_INVALID) { if (!feature.ParseFloatList(&out.float_list)) { return parse_error(); } if (out.float_list.size() % num_elements != 0) { return shape_error(out.float_list.size(), "float"); } } out.example_end_indices.push_back(out.float_list.size()); break; } case DT_STRING: { if (example_dtype != DT_INVALID) { if (!feature.ParseBytesList(&out.bytes_list)) { return parse_error(); } if (out.bytes_list.size() % num_elements != 0) { return shape_error(out.bytes_list.size(), "bytes"); } } out.example_end_indices.push_back(out.bytes_list.size()); break; } default: LOG(FATAL) << "Should not happen."; } if (output_stats) { const size_t out_examples_count = out.example_end_indices.size(); if (out_examples_count == 1) { output_stats->feature_values_count += out.example_end_indices[0]; } else { output_stats->feature_values_count += out.example_end_indices[out_examples_count - 1] - out.example_end_indices[out_examples_count - 2]; } } } } else { auto& last_example = is_ragged ? ragged_feature_last_example : sparse_feature_last_example; if (last_example[d] == example_index) { LogSparseFeatureDataLoss(feature_name); continue; } last_example[d] = example_index; SparseBuffer& out = is_ragged ? (*output_ragged)[d] : (*output_sparse)[d]; DataType feature_dtype = is_ragged ? config.ragged[d].dtype : config.sparse[d].dtype; if (example_dtype != DT_INVALID && example_dtype != feature_dtype) { return example_error( strings::StrCat("Data types don't match. ", "Expected type: ", DataTypeString(feature_dtype), ", Actual type: ", DataTypeString(example_dtype))); } switch (feature_dtype) { case DT_INT64: { if (example_dtype != DT_INVALID) { if (!feature.ParseInt64List(&out.int64_list)) { return parse_error(); } } out.example_end_indices.push_back(out.int64_list.size()); break; } case DT_FLOAT: { if (example_dtype != DT_INVALID) { if (!feature.ParseFloatList(&out.float_list)) { return parse_error(); } } out.example_end_indices.push_back(out.float_list.size()); break; } case DT_STRING: { if (example_dtype != DT_INVALID) { if (!feature.ParseBytesList(&out.bytes_list)) { return parse_error(); } } out.example_end_indices.push_back(out.bytes_list.size()); break; } default: LOG(FATAL) << "Should not happen."; } if (output_stats) { const size_t out_examples_count = out.example_end_indices.size(); if (out_examples_count == 1) { output_stats->feature_values_count += out.example_end_indices[0]; } else { output_stats->feature_values_count += out.example_end_indices[out_examples_count - 1] - out.example_end_indices[out_examples_count - 2]; } } } } for (size_t d = 0; d < config.dense.size(); ++d) { if (config.dense[d].variable_length) continue; if (dense_feature_last_example[d] == example_index) continue; if (config.dense[d].default_value.NumElements() == 0) { return errors::InvalidArgument( "Name: ", example_name, ", Feature: ", config.dense[d].feature_name, " (data type: ", DataTypeString(config.dense[d].dtype), ")", " is required but could not be found."); } const Tensor& in = config.dense[d].default_value; Tensor& out = (*output_dense)[d]; const std::size_t num_elements = in.shape().num_elements(); const std::size_t offset = example_index * num_elements; switch (config.dense[d].dtype) { case DT_INT64: { std::copy_n(in.flat<int64_t>().data(), num_elements, out.flat<int64_t>().data() + offset); break; }
#include "tensorflow/core/util/example_proto_fast_parsing.h" #include <unordered_set> #include <utility> #include <vector> #include "tensorflow/core/example/example.pb.h" #include "tensorflow/core/example/feature.pb.h" #include "tensorflow/core/lib/random/philox_random.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/util/example_proto_fast_parsing_test.pb.h" namespace tensorflow { namespace example { namespace { constexpr char kDenseInt64Key[] = "dense_int64"; constexpr char kDenseFloatKey[] = "dense_float"; constexpr char kDenseStringKey[] = "dense_string"; constexpr char kSparseInt64Key[] = "sparse_int64"; constexpr char kSparseFloatKey[] = "sparse_float"; constexpr char kSparseStringKey[] = "sparse_string"; string SerializedToReadable(string serialized) { string result; result += '"'; for (char c : serialized) result += strings::StrCat("\\x", strings::Hex(c, strings::kZeroPad2)); result += '"'; return result; } template <class T> string Serialize(const T& example) { string serialized; example.SerializeToString(&serialized); return serialized; } void TestCorrectness(const string& serialized) { Example example; Example fast_example; EXPECT_TRUE(example.ParseFromString(serialized)); example.DiscardUnknownFields(); EXPECT_TRUE(TestFastParse(serialized, &fast_example)); EXPECT_EQ(example.DebugString(), fast_example.DebugString()); if (example.DebugString() != fast_example.DebugString()) { LOG(ERROR) << "Bad serialized: " << SerializedToReadable(serialized); } } TEST(FastParse, IgnoresPrecedingUnknownTopLevelFields) { ExampleWithExtras example; (*example.mutable_features()->mutable_feature())["age"] .mutable_int64_list() ->add_value(13); example.set_extra1("some_str"); example.set_extra2(123); example.set_extra3(234); example.set_extra4(345); example.set_extra5(4.56); example.add_extra6(5.67); example.add_extra6(6.78); (*example.mutable_extra7()->mutable_feature())["extra7"] .mutable_int64_list() ->add_value(1337); Example context; (*context.mutable_features()->mutable_feature())["zipcode"] .mutable_int64_list() ->add_value(94043); TestCorrectness(strings::StrCat(Serialize(example), Serialize(context))); } TEST(FastParse, IgnoresTrailingUnknownTopLevelFields) { Example example; (*example.mutable_features()->mutable_feature())["age"] .mutable_int64_list() ->add_value(13); ExampleWithExtras context; (*context.mutable_features()->mutable_feature())["zipcode"] .mutable_int64_list() ->add_value(94043); context.set_extra1("some_str"); context.set_extra2(123); context.set_extra3(234); context.set_extra4(345); context.set_extra5(4.56); context.add_extra6(5.67); context.add_extra6(6.78); (*context.mutable_extra7()->mutable_feature())["extra7"] .mutable_int64_list() ->add_value(1337); TestCorrectness(strings::StrCat(Serialize(example), Serialize(context))); } TEST(FastParse, SingleInt64WithContext) { Example example; (*example.mutable_features()->mutable_feature())["age"] .mutable_int64_list() ->add_value(13); Example context; (*context.mutable_features()->mutable_feature())["zipcode"] .mutable_int64_list() ->add_value(94043); TestCorrectness(strings::StrCat(Serialize(example), Serialize(context))); } TEST(FastParse, DenseInt64WithContext) { Example example; (*example.mutable_features()->mutable_feature())["age"] .mutable_int64_list() ->add_value(0); Example context; (*context.mutable_features()->mutable_feature())["age"] .mutable_int64_list() ->add_value(15); string serialized = Serialize(example) + Serialize(context); { Example deserialized; EXPECT_TRUE(deserialized.ParseFromString(serialized)); EXPECT_EQ(deserialized.DebugString(), context.DebugString()); } TestCorrectness(serialized); } TEST(FastParse, NonPacked) { TestCorrectness( "\x0a\x0e\x0a\x0c\x0a\x03\x61\x67\x65\x12\x05\x1a\x03\x0a\x01\x0d"); } TEST(FastParse, Packed) { TestCorrectness( "\x0a\x0d\x0a\x0b\x0a\x03\x61\x67\x65\x12\x04\x1a\x02\x08\x0d"); } TEST(FastParse, ValueBeforeKeyInMap) { TestCorrectness("\x0a\x12\x0a\x10\x12\x09\x0a\x07\x0a\x05value\x0a\x03key"); } TEST(FastParse, EmptyFeatures) { Example example; example.mutable_features(); TestCorrectness(Serialize(example)); } void TestCorrectnessJson(const string& json) { auto resolver = protobuf::util::NewTypeResolverForDescriptorPool( "type.googleapis.com", protobuf::DescriptorPool::generated_pool()); string serialized; auto s = protobuf::util::JsonToBinaryString( resolver, "type.googleapis.com/tensorflow.Example", json, &serialized); EXPECT_TRUE(s.ok()) << s; delete resolver; TestCorrectness(serialized); } TEST(FastParse, JsonUnivalent) { TestCorrectnessJson( "{'features': {" " 'feature': {'age': {'int64_list': {'value': [0]} }}, " " 'feature': {'flo': {'float_list': {'value': [1.1]} }}, " " 'feature': {'byt': {'bytes_list': {'value': ['WW8='] }}}" "}}"); } TEST(FastParse, JsonMultivalent) { TestCorrectnessJson( "{'features': {" " 'feature': {'age': {'int64_list': {'value': [0, 13, 23]} }}, " " 'feature': {'flo': {'float_list': {'value': [1.1, 1.2, 1.3]} }}, " " 'feature': {'byt': {'bytes_list': {'value': ['WW8=', 'WW8K'] }}}" "}}"); } TEST(FastParse, SingleInt64) { Example example; (*example.mutable_features()->mutable_feature())["age"] .mutable_int64_list() ->add_value(13); TestCorrectness(Serialize(example)); } static string ExampleWithSomeFeatures() { Example example; (*example.mutable_features()->mutable_feature())[""]; (*example.mutable_features()->mutable_feature())["empty_bytes_list"] .mutable_bytes_list(); (*example.mutable_features()->mutable_feature())["empty_float_list"] .mutable_float_list(); (*example.mutable_features()->mutable_feature())["empty_int64_list"] .mutable_int64_list(); BytesList* bytes_list = (*example.mutable_features()->mutable_feature())["bytes_list"] .mutable_bytes_list(); bytes_list->add_value("bytes1"); bytes_list->add_value("bytes2"); FloatList* float_list = (*example.mutable_features()->mutable_feature())["float_list"] .mutable_float_list(); float_list->add_value(1.0); float_list->add_value(2.0); Int64List* int64_list = (*example.mutable_features()->mutable_feature())["int64_list"] .mutable_int64_list(); int64_list->add_value(3); int64_list->add_value(270); int64_list->add_value(86942); return Serialize(example); } TEST(FastParse, SomeFeatures) { TestCorrectness(ExampleWithSomeFeatures()); } static void AddDenseFeature(const char* feature_name, DataType dtype, PartialTensorShape shape, bool variable_length, size_t elements_per_stride, FastParseExampleConfig* out_config) { out_config->dense.emplace_back(); auto& new_feature = out_config->dense.back(); new_feature.feature_name = feature_name; new_feature.dtype = dtype; new_feature.shape = std::move(shape); new_feature.default_value = Tensor(dtype, {}); new_feature.variable_length = variable_length; new_feature.elements_per_stride = elements_per_stride; } static void AddSparseFeature(const char* feature_name, DataType dtype, FastParseExampleConfig* out_config) { out_config->sparse.emplace_back(); auto& new_feature = out_config->sparse.back(); new_feature.feature_name = feature_name; new_feature.dtype = dtype; } TEST(FastParse, StatsCollection) { const size_t kNumExamples = 13; std::vector<tstring> serialized(kNumExamples, ExampleWithSomeFeatures()); FastParseExampleConfig config_dense; AddDenseFeature("bytes_list", DT_STRING, {2}, false, 2, &config_dense); AddDenseFeature("float_list", DT_FLOAT, {2}, false, 2, &config_dense); AddDenseFeature("int64_list", DT_INT64, {3}, false, 3, &config_dense); config_dense.collect_feature_stats = true; FastParseExampleConfig config_varlen; AddDenseFeature("bytes_list", DT_STRING, {-1}, true, 1, &config_varlen); AddDenseFeature("float_list", DT_FLOAT, {-1}, true, 1, &config_varlen); AddDenseFeature("int64_list", DT_INT64, {-1}, true, 1, &config_varlen); config_varlen.collect_feature_stats = true; FastParseExampleConfig config_sparse; AddSparseFeature("bytes_list", DT_STRING, &config_sparse); AddSparseFeature("float_list", DT_FLOAT, &config_sparse); AddSparseFeature("int64_list", DT_INT64, &config_sparse); config_sparse.collect_feature_stats = true; FastParseExampleConfig config_mixed; AddDenseFeature("bytes_list", DT_STRING, {2}, false, 2, &config_mixed); AddDenseFeature("float_list", DT_FLOAT, {-1}, true, 1, &config_mixed); AddSparseFeature("int64_list", DT_INT64, &config_mixed); config_mixed.collect_feature_stats = true; for (const FastParseExampleConfig& config : {config_dense, config_varlen, config_sparse, config_mixed}) { { Result result; TF_CHECK_OK(FastParseExample(config, serialized, {}, nullptr, &result)); EXPECT_EQ(kNumExamples, result.feature_stats.size()); for (const PerExampleFeatureStats& stats : result.feature_stats) { EXPECT_EQ(7, stats.features_count); EXPECT_EQ(7, stats.feature_values_count); } } { Result result; TF_CHECK_OK(FastParseSingleExample(config, serialized[0], &result)); EXPECT_EQ(1, result.feature_stats.size()); EXPECT_EQ(7, result.feature_stats[0].features_count); EXPECT_EQ(7, result.feature_stats[0].feature_values_count); } } } string RandStr(random::SimplePhilox* rng) { static const char key_char_lookup[] = "0123456789{}~`!@#$%^&*()" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"; auto len = 1 + rng->Rand32() % 200; string str; str.reserve(len); while (len-- > 0) { str.push_back( key_char_lookup[rng->Rand32() % (sizeof(key_char_lookup) / sizeof(key_char_lookup[0]))]); } return str; } void Fuzz(random::SimplePhilox* rng) { auto num_keys = 1 + rng->Rand32() % 100; std::unordered_set<string> unique_keys; for (auto i = 0; i < num_keys; ++i) { unique_keys.emplace(RandStr(rng)); } Example example; string serialized_example; auto num_concats = 1 + rng->Rand32() % 4; std::vector<Feature::KindCase> feat_types( {Feature::kBytesList, Feature::kFloatList, Feature::kInt64List}); std::vector<string> all_keys(unique_keys.begin(), unique_keys.end()); while (num_concats--) { example.Clear(); auto num_active_keys = 1 + rng->Rand32() % all_keys.size(); for (auto i = 0; i < num_active_keys; ++i) { auto fkey = all_keys[rng->Rand32() % all_keys.size()]; auto ftype_idx = rng->Rand32() % feat_types.size(); auto num_features = 1 + rng->Rand32() % 5; switch (static_cast<Feature::KindCase>(feat_types[ftype_idx])) { case Feature::kBytesList: { BytesList* bytes_list = (*example.mutable_features()->mutable_feature())[fkey] .mutable_bytes_list(); while (num_features--) { bytes_list->add_value(RandStr(rng)); } break; } case Feature::kFloatList: { FloatList* float_list = (*example.mutable_features()->mutable_feature())[fkey] .mutable_float_list(); while (num_features--) { float_list->add_value(rng->RandFloat()); } break; } case Feature::kInt64List: { Int64List* int64_list = (*example.mutable_features()->mutable_feature())[fkey] .mutable_int64_list(); while (num_features--) { int64_list->add_value(rng->Rand64()); } break; } default: { LOG(QFATAL); break; } } } serialized_example += example.SerializeAsString(); } TestCorrectness(serialized_example); } TEST(FastParse, FuzzTest) { const uint64 seed = 1337; random::PhiloxRandom philox(seed); random::SimplePhilox rng(&philox); auto num_runs = 200; while (num_runs--) { LOG(INFO) << "runs left: " << num_runs; Fuzz(&rng); } } TEST(TestFastParseExample, Empty) { Result result; FastParseExampleConfig config; config.sparse.push_back({"test", DT_STRING}); Status status = FastParseExample(config, absl::Span<const tstring>(), absl::Span<const tstring>(), nullptr, &result); EXPECT_TRUE(status.ok()) << status; } } } }
bool ParseExample(StringPiece serialized, parsed::Example* example) { DCHECK(example != nullptr); protobuf::io::CodedInputStream stream( reinterpret_cast<const uint8*>(serialized.data()), serialized.size()); EnableAliasing(&stream); return ParseExample(&stream, example); }
TEST(TestFastParseExample, Empty) { Result result; FastParseExampleConfig config; config.sparse.push_back({"test", DT_STRING}); Status status = FastParseExample(config, absl::Span<const tstring>(), absl::Span<const tstring>(), nullptr, &result); EXPECT_TRUE(status.ok()) << status; }
#include "tensorstore/internal/oauth2/oauth_utils.h" #include <stddef.h> #include <memory> #include <optional> #include <utility> #include "absl/status/status.h" #include "absl/strings/escaping.h" #include "absl/time/time.h" #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/pem.h> #include <openssl/rsa.h> #include "tensorstore/internal/json_binding/bindable.h" #include "tensorstore/internal/json_binding/json_binding.h" #include "tensorstore/util/result.h" #include "tensorstore/util/str_cat.h" namespace jb = tensorstore::internal_json_binding; namespace tensorstore { namespace { constexpr char kCryptoAlgorithm[] = "RS256"; constexpr char kJwtType[] = "JWT"; constexpr char kGrantType[] = "urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer"; } namespace internal_oauth2 { Result<std::string> SignWithRSA256(std::string_view private_key, std::string_view to_sign) { if (private_key.empty()) { return absl::InternalError("No private key provided."); } const auto md = EVP_sha256(); assert(md != nullptr); auto md_ctx = std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)>( EVP_MD_CTX_create(), &EVP_MD_CTX_free); assert(md_ctx != nullptr); auto pem_buffer = std::unique_ptr<BIO, decltype(&BIO_free)>( BIO_new_mem_buf(static_cast<const char*>(private_key.data()), static_cast<int>(private_key.length())), &BIO_free); if (!pem_buffer) { return absl::InternalError("Could not create the PEM buffer."); } auto key = std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>( PEM_read_bio_PrivateKey( static_cast<BIO*>(pem_buffer.get()), nullptr, nullptr, nullptr), &EVP_PKEY_free); if (!key) { return absl::InternalError("Could not load the private key."); } if (EVP_DigestSignInit(md_ctx.get(), nullptr, md, nullptr, key.get()) != 1) { return absl::InternalError("DigestInit failed."); } if (EVP_DigestSignUpdate(md_ctx.get(), to_sign.data(), to_sign.size()) != 1) { return absl::InternalError("DigestUpdate failed."); } size_t sig_len = 0; if (EVP_DigestSignFinal(md_ctx.get(), nullptr, &sig_len) != 1) { return absl::InternalError("DigestFinal (get signature length) failed."); } std::unique_ptr<unsigned char[]> sig(new unsigned char[sig_len]); if (EVP_DigestSignFinal(md_ctx.get(), sig.get(), &sig_len) != 1) { return absl::InternalError("DigestFinal (signature compute) failed."); } std::string signature; absl::WebSafeBase64Escape( std::string_view(reinterpret_cast<char*>(sig.get()), sig_len), &signature); return std::move(signature); } std::string BuildJWTHeader(std::string_view key_id) { ::nlohmann::json assertion_header = { {"alg", kCryptoAlgorithm}, {"typ", kJwtType}, {"kid", std::string(key_id)}, }; std::string encoded_header; absl::WebSafeBase64Escape(assertion_header.dump(), &encoded_header); return encoded_header; } std::string BuildJWTClaimBody(std::string_view client_email, std::string_view scope, std::string_view audience, absl::Time now, std::int64_t lifetime) { const std::int64_t request_timestamp_sec = absl::ToUnixSeconds(now); const std::int64_t expiration_timestamp_sec = request_timestamp_sec + lifetime; ::nlohmann::json assertion_payload = { {"iss", std::string(client_email)}, {"scope", std::string(scope)}, {"aud", std::string(audience)}, {"iat", request_timestamp_sec}, {"exp", expiration_timestamp_sec}, }; std::string encoded_payload; absl::WebSafeBase64Escape(assertion_payload.dump(), &encoded_payload); return encoded_payload; } Result<std::string> BuildSignedJWTRequest(std::string_view private_key, std::string_view header, std::string_view body) { auto claim = tensorstore::StrCat(header, ".", body); auto result = SignWithRSA256(private_key, claim); if (!result) { return result.status(); } return tensorstore::StrCat("grant_type=", kGrantType, "&assertion=", claim, ".", *result); } constexpr static auto ErrorResponseBinder = jb::Object( jb::Member("error", jb::Projection(&ErrorResponse::error, jb::NonEmptyStringBinder)), jb::Member("error_description", jb::Projection(&ErrorResponse::error_description, jb::NonEmptyStringBinder)), jb::Member("error_uri", jb::Projection(&ErrorResponse::error_uri, jb::NonEmptyStringBinder)), jb::Member("error_subtype", jb::Projection(&ErrorResponse::error_subtype, jb::NonEmptyStringBinder)), jb::DiscardExtraMembers); Result<ErrorResponse> ParseErrorResponse(const ::nlohmann::json& error) { if (error.is_discarded()) { return absl::InvalidArgumentError("Invalid ErrorResponse"); } return jb::FromJson<ErrorResponse>(error, ErrorResponseBinder); } constexpr static auto GoogleServiceAccountCredentialsBinder = jb::Object( jb::Member("private_key", jb::Projection(&GoogleServiceAccountCredentials::private_key, jb::NonEmptyStringBinder)), jb::Member("private_key_id", jb::Projection(&GoogleServiceAccountCredentials::private_key_id, jb::NonEmptyStringBinder)), jb::Member("client_email", jb::Projection(&GoogleServiceAccountCredentials::client_email, jb::NonEmptyStringBinder)), jb::Member("token_uri", jb::Projection(&GoogleServiceAccountCredentials::token_uri, jb::DefaultInitializedValue())), jb::DiscardExtraMembers); Result<GoogleServiceAccountCredentials> ParseGoogleServiceAccountCredentialsImpl(const ::nlohmann::json& credentials) { if (credentials.is_discarded()) { return absl::InvalidArgumentError( "Invalid GoogleServiceAccountCredentials token"); } auto creds_token = jb::FromJson<GoogleServiceAccountCredentials>( credentials, GoogleServiceAccountCredentialsBinder); if (!creds_token.ok()) { return absl::InvalidArgumentError(tensorstore::StrCat( "Invalid GoogleServiceAccountCredentials: ", creds_token.status())); } return creds_token; } Result<GoogleServiceAccountCredentials> ParseGoogleServiceAccountCredentials( std::string_view source) { auto credentials = internal::ParseJson(source); if (credentials.is_discarded()) { return absl::InvalidArgumentError(tensorstore::StrCat( "Invalid GoogleServiceAccountCredentials: ", source)); } return ParseGoogleServiceAccountCredentialsImpl(credentials); } constexpr static auto RefreshTokenBinder = jb::Object( jb::Member("client_id", jb::Projection(&RefreshToken::client_id, jb::NonEmptyStringBinder)), jb::Member("client_secret", jb::Projection(&RefreshToken::client_secret, jb::NonEmptyStringBinder)), jb::Member("refresh_token", jb::Projection(&RefreshToken::refresh_token, jb::NonEmptyStringBinder)), jb::DiscardExtraMembers); Result<RefreshToken> ParseRefreshTokenImpl( const ::nlohmann::json& credentials) { if (credentials.is_discarded()) { return absl::UnauthenticatedError("Invalid RefreshToken token"); } auto refresh_token = jb::FromJson<RefreshToken>(credentials, RefreshTokenBinder); if (!refresh_token.ok()) { return absl::UnauthenticatedError( tensorstore::StrCat("Invalid RefreshToken: ", credentials.dump())); } return refresh_token; } Result<RefreshToken> ParseRefreshToken(std::string_view source) { auto credentials = internal::ParseJson(source); if (credentials.is_discarded()) { return absl::UnauthenticatedError( tensorstore::StrCat("Invalid RefreshToken: ", source)); } return ParseRefreshTokenImpl(credentials); } constexpr static auto OAuthResponseBinder = jb::Object( jb::Member("token_type", jb::Projection(&OAuthResponse::token_type, jb::NonEmptyStringBinder)), jb::Member("access_token", jb::Projection(&OAuthResponse::access_token, jb::NonEmptyStringBinder)), jb::Member("expires_in", jb::Projection(&OAuthResponse::expires_in, jb::LooseInteger<int64_t>(1))), jb::DiscardExtraMembers); Result<OAuthResponse> ParseOAuthResponseImpl( const ::nlohmann::json& credentials) { if (credentials.is_discarded()) { return absl::UnauthenticatedError("Invalid OAuthResponse token"); } auto response_token = jb::FromJson<OAuthResponse>(credentials, OAuthResponseBinder); if (!response_token.ok()) { return absl::UnauthenticatedError( tensorstore::StrCat("Invalid OAuthResponse: ", credentials.dump())); } return response_token; } Result<OAuthResponse> ParseOAuthResponse(std::string_view source) { auto credentials = internal::ParseJson(source); if (credentials.is_discarded()) { return absl::UnauthenticatedError( tensorstore::StrCat("Invalid OAuthResponse: ", source)); } return ParseOAuthResponseImpl(credentials); } } }
#include "tensorstore/internal/oauth2/oauth_utils.h" #include <gtest/gtest.h> #include "absl/strings/escaping.h" #include "tensorstore/internal/json_gtest.h" #include "tensorstore/internal/oauth2/fake_private_key.h" #include "tensorstore/util/status.h" #include "tensorstore/util/str_cat.h" namespace { using ::tensorstore::internal_oauth2::GetFakePrivateKey; using ::tensorstore::internal_oauth2::ParseGoogleServiceAccountCredentials; using ::tensorstore::internal_oauth2::ParseOAuthResponse; using ::tensorstore::internal_oauth2::ParseRefreshToken; std::string GetJsonKeyFileContents() { constexpr char kJsonKeyfilePrefix[] = R"""({ "type": "service_account", "project_id": "foo-project", "private_key_id": "a1a111aa1111a11a11a11aa111a111a1a1111111", "client_email": "foo-email@foo-project.iam.gserviceaccount.com", "client_id": "100000000000000000001", "auth_uri": "https: "token_uri": "https: "auth_provider_x509_cert_url": "https: "client_x509_cert_url": "https: )"""; return tensorstore::StrCat(kJsonKeyfilePrefix, " \"private_key\": \"", absl::CEscape(GetFakePrivateKey()), "\" }"); } TEST(OAuthUtilTest, GoogleServiceAccountCredentials_Invalid) { EXPECT_FALSE(ParseGoogleServiceAccountCredentials("{ }").ok()); EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({ "private_key" : "", "private_key_id": "", "client_email": "", "token_uri": "" })") .ok()); EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({ "private_key" : "", "private_key_id": "abc", "client_email": "456" })") .ok()); EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({ "private_key" : "123", "private_key_id": "", "client_email": "456" })") .ok()); EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({ "private_key" : "123", "private_key_id": "abc", "client_email": "" })") .ok()); EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({ "private_key" : "123", "private_key_id": "abc", "client_email": "456" "token_uri": "" })") .ok()); EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({ "private_key_id": "abc", "client_email": "456", })") .ok()); EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({ "private_key" : "123", "client_email": "456", })") .ok()); EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({ "private_key" : "123", "private_key_id": "abc", })") .ok()); } TEST(OAuthUtilTest, GoogleServiceAccountCredentials) { auto result = ParseGoogleServiceAccountCredentials(R"({ "private_key" : "123", "private_key_id": "abc", "client_email": "456", "token_uri": "wxy" })"); ASSERT_TRUE(result.ok()) << result.status(); EXPECT_EQ("123", result.value().private_key); EXPECT_EQ("abc", result.value().private_key_id); EXPECT_EQ("456", result.value().client_email); EXPECT_EQ("wxy", result.value().token_uri); result = ParseGoogleServiceAccountCredentials(R"({ "private_key" : "123", "private_key_id": "abc", "client_email": "456" })"); ASSERT_TRUE(result.ok()) << result.status(); EXPECT_EQ("123", result.value().private_key); EXPECT_EQ("abc", result.value().private_key_id); EXPECT_EQ("456", result.value().client_email); EXPECT_EQ("", result.value().token_uri); } TEST(OAuthUtilTest, GoogleServiceAccountCredentialsFile) { auto result = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents()); ASSERT_TRUE(result.ok()) << result.status(); EXPECT_EQ("foo-email@foo-project.iam.gserviceaccount.com", result->client_email); } TEST(OAuthUtilTest, ParseRefreshToken_Invalid) { EXPECT_FALSE(ParseRefreshToken("{ }").ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "", "client_secret": "", "refresh_token": "" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "", "client_secret": "abc", "refresh_token": "456" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "", "refresh_token": "456" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "abc", "refresh_token": "" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "abc", "refresh_token": 456 })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_secret": "abc", "refresh_token": "456" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "refresh_token": "456" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "abc", })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"json({ "error": "invalid_grant", "error_description": "reauth related error (invalid_rapt)", "error_uri": "https: "error_subtype": "invalid_rapt" })json") .ok()); } TEST(OAuthUtilTest, ParseRefreshToken) { auto result = ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "abc", "refresh_token": "456" })"); ASSERT_TRUE(result.ok()) << result.status(); EXPECT_EQ("123", result.value().client_id); EXPECT_EQ("abc", result.value().client_secret); EXPECT_EQ("456", result.value().refresh_token); } TEST(OAuthUtilTest, ParseOAuthResponse_Invalid) { EXPECT_FALSE(ParseOAuthResponse("{ }").ok()); EXPECT_FALSE(ParseOAuthResponse(R"json({ "token_type" : "", "access_token": "abc", "expires_in": 456 })json") .ok()); EXPECT_FALSE(ParseOAuthResponse(R"json({ "token_type" : "123", "access_token": "", "expires_in": 456 })json") .ok()); EXPECT_FALSE(ParseOAuthResponse(R"json({ "token_type" : "123", "access_token": "abc", })json") .ok()); EXPECT_FALSE(ParseOAuthResponse(R"json({ "error": "invalid_grant", "error_description": "reauth related error (invalid_rapt)", "error_uri": "https: "error_subtype": "invalid_rapt" })json") .ok()); } TEST(OAuthUtilTest, ParseOAuthResponse) { EXPECT_TRUE(ParseOAuthResponse(R"({ "token_type" : "123", "access_token": "abc", "expires_in": "456" })") .ok()); auto result = ParseOAuthResponse(R"({ "token_type" : "123", "access_token": "abc", "expires_in": 456 })"); ASSERT_TRUE(result.ok()) << result.status(); EXPECT_EQ("123", result.value().token_type); EXPECT_EQ("abc", result.value().access_token); EXPECT_EQ(456, result.value().expires_in); result = ParseOAuthResponse(R"({ "token_type" : "123", "access_token": "abc", "expires_in": 456, "extra_fields": "are ignored" })"); ASSERT_TRUE(result.ok()) << result.status(); } TEST(OAuthUtilTest, BuildJWTClaimTest) { using ::tensorstore::internal_oauth2::BuildJWTClaimBody; using ::tensorstore::internal_oauth2::BuildJWTHeader; EXPECT_EQ("eyJhbGciOiJSUzI1NiIsImtpZCI6ImEiLCJ0eXAiOiJKV1QifQ", BuildJWTHeader("a")); EXPECT_EQ( "eyJhdWQiOiI0IiwiZXhwIjoxNTQ3NjY5NzAzLCJpYXQiOjE1NDc2NjYxMDMsImlzcyI6ImIi" "LCJzY29wZSI6ImMifQ", BuildJWTClaimBody("b", "c", "4", absl::FromUnixSeconds(1547666103), 3600)); } TEST(OAuthUtilTest, Sign) { using ::tensorstore::internal_oauth2::SignWithRSA256; { auto result = SignWithRSA256("", "something"); EXPECT_FALSE(result.ok()); } { constexpr char kBadKey[] = "-----BEGIN PRIVATE KEY-----\n" "Z23x2ZUyar6i0BQ8eJFAEN+IiUapEeCVazuxJSt4RjYfwSa/" "p117jdZGEWD0GxMC\nlUtj+/nH3HDQjM4ltYfTPUg=\n" "-----END PRIVATE KEY-----\n"; auto result = SignWithRSA256(kBadKey, "something"); EXPECT_FALSE(result.ok()); } auto creds = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents()); ASSERT_TRUE(creds.ok()); { auto result = SignWithRSA256(creds->private_key, "something"); ASSERT_TRUE(result.ok()); EXPECT_EQ( "A-sH4BVqtxu-6LECWJCb0VKGDj46pnpBpZB1KViuhG2CwugRVR6V3-" "w8eBvAUbIRewSnXp_lWkxdy_rZBMau9VuILnLOC0t692-" "L8WEqHsoFYBWvTZGCT5XkslVXhxt4d8jgM6U_8If4Cf3fGA4XAxpP-pyrbPGz-" "VXn6R7jcLGOLsFtcuAXpJ9zkwYE72pGUtI_hiU-" "tquIEayOQW9frXJlxt2oR4ld1l3p0FWibkNY8OfYPdTlRS0WcsgpWngTamHEBplJ5xNLD5" "Ye5bG1DFqBJn0evxW0btbcfKCYuyirvgvHPsTt-" "YMcPGo1xtlhT5c4ycEHOObFUGDpKPjljw", *result); } } TEST(OAuthUtilTest, BuildJWTRequestBody) { using ::tensorstore::internal_oauth2::BuildSignedJWTRequest; auto creds = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents()); ASSERT_TRUE(creds.ok()); auto result = BuildSignedJWTRequest(creds->private_key, "header", "something"); ASSERT_TRUE(result.ok()); EXPECT_EQ( "grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&" "assertion=header.something.LyvY9ZVG6tL34g5Wji--3G5JGQP-" "fza47yBQIrRHJqecVUTVGuEXti_deBjSbB36gvpBOE67-U9h1wgD2VR_" "MDx8JaQHGct04gVZdKC7m4uqu5lI8u0jqXGG4UbRwfUMZ0UCjxJfyUbg6KUR7iyiqoH5szZv" "31rJISnM4RQvH-lQFrE6BuXpvB09Hve4T3q5mtq7E9pd5rXz_" "vlqL5ib5tkdBEg2cbydDZHeCx5uA9qcg3hGidrU1fLgreFKu3dSvzu4qFZL3-" "0Pnt4XMqwslx2vBbFQB7_K8Dnz10F1TA5njOvwFRWNjKM1I0cRZ5N3O1CnGv1wyAz-" "FIcKdk5_7Q", *result); } }
Result<RefreshToken> ParseRefreshToken(std::string_view source) { auto credentials = internal::ParseJson(source); if (credentials.is_discarded()) { return absl::UnauthenticatedError( tensorstore::StrCat("Invalid RefreshToken: ", source)); } return ParseRefreshTokenImpl(credentials); }
TEST(OAuthUtilTest, ParseRefreshToken_Invalid) { EXPECT_FALSE(ParseRefreshToken("{ }").ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "", "client_secret": "", "refresh_token": "" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "", "client_secret": "abc", "refresh_token": "456" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "", "refresh_token": "456" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "abc", "refresh_token": "" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "abc", "refresh_token": 456 })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_secret": "abc", "refresh_token": "456" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "refresh_token": "456" })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "abc", })") .ok()); EXPECT_FALSE(ParseRefreshToken(R"json({ "error": "invalid_grant", "error_description": "reauth related error (invalid_rapt)", "error_uri": "https: "error_subtype": "invalid_rapt" })json") .ok()); } TEST(OAuthUtilTest, ParseRefreshToken) { auto result = ParseRefreshToken(R"({ "client_id" : "123", "client_secret": "abc", "refresh_token": "456" })"); ASSERT_TRUE(result.ok()) << result.status(); EXPECT_EQ("123", result.value().client_id); EXPECT_EQ("abc", result.value().client_secret); EXPECT_EQ("456", result.value().refresh_token); }
#include "tensorflow/js/ops/ts_op_gen.h" #include <memory> #include <unordered_map> #include <vector> #include "tensorflow/core/framework/api_def.pb.h" #include "tensorflow/core/framework/op_def_util.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/public/version.h" namespace tensorflow { namespace { static bool IsListAttr(const OpDef_ArgDef& arg) { return !arg.type_list_attr().empty() || !arg.number_attr().empty(); } struct ArgDefs { ArgDefs(const OpDef::ArgDef& op_def_arg, const ApiDef::Arg& api_def_arg) : op_def_arg(op_def_arg), api_def_arg(api_def_arg) {} const OpDef::ArgDef& op_def_arg; const ApiDef::Arg& api_def_arg; }; struct OpAttrs { OpAttrs(const OpDef::AttrDef& op_def_attr, const ApiDef::Attr& api_def_attr) : op_def_attr(op_def_attr), api_def_attr(api_def_attr) {} const OpDef::AttrDef& op_def_attr; const ApiDef::Attr& api_def_attr; }; class GenTypeScriptOp { public: GenTypeScriptOp(const OpDef& op_def, const ApiDef& api_def); ~GenTypeScriptOp(); string Code(); private: void ProcessArgs(); void ProcessAttrs(); void AddAttrForArg(const string& attr, int arg_index); string InputForAttr(const OpDef::AttrDef& op_def_attr); void AddMethodSignature(); void AddOpAttrs(); void AddMethodReturnAndClose(); const OpDef& op_def_; const ApiDef& api_def_; string result_; std::vector<ArgDefs> input_op_args_; std::vector<OpAttrs> op_attrs_; typedef std::unordered_map<string, std::vector<int>> AttrArgIdxMap; AttrArgIdxMap attr_arg_idx_map_; int num_outputs_; }; GenTypeScriptOp::GenTypeScriptOp(const OpDef& op_def, const ApiDef& api_def) : op_def_(op_def), api_def_(api_def), num_outputs_(0) {} GenTypeScriptOp::~GenTypeScriptOp() = default; string GenTypeScriptOp::Code() { ProcessArgs(); ProcessAttrs(); AddMethodSignature(); AddOpAttrs(); AddMethodReturnAndClose(); strings::StrAppend(&result_, "\n"); return result_; } void GenTypeScriptOp::ProcessArgs() { for (int i = 0; i < api_def_.arg_order_size(); i++) { auto op_def_arg = FindInputArg(api_def_.arg_order(i), op_def_); if (op_def_arg == nullptr) { LOG(WARNING) << "Could not find OpDef::ArgDef for " << api_def_.arg_order(i); continue; } auto api_def_arg = FindInputArg(api_def_.arg_order(i), api_def_); if (api_def_arg == nullptr) { LOG(WARNING) << "Could not find ApiDef::Arg for " << api_def_.arg_order(i); continue; } if (!op_def_arg->type_attr().empty()) { AddAttrForArg(op_def_arg->type_attr(), i); } else if (!op_def_arg->type_list_attr().empty()) { AddAttrForArg(op_def_arg->type_list_attr(), i); } if (!op_def_arg->number_attr().empty()) { AddAttrForArg(op_def_arg->number_attr(), i); } input_op_args_.push_back(ArgDefs(*op_def_arg, *api_def_arg)); } num_outputs_ = api_def_.out_arg_size(); } void GenTypeScriptOp::ProcessAttrs() { for (int i = 0; i < op_def_.attr_size(); i++) { op_attrs_.push_back(OpAttrs(op_def_.attr(i), api_def_.attr(i))); } } void GenTypeScriptOp::AddAttrForArg(const string& attr, int arg_index) { auto iter = attr_arg_idx_map_.find(attr); if (iter == attr_arg_idx_map_.end()) { attr_arg_idx_map_.insert(AttrArgIdxMap::value_type(attr, {arg_index})); } else { iter->second.push_back(arg_index); } } string GenTypeScriptOp::InputForAttr(const OpDef::AttrDef& op_def_attr) { string inputs; auto arg_list = attr_arg_idx_map_.find(op_def_attr.name()); if (arg_list != attr_arg_idx_map_.end()) { for (auto iter = arg_list->second.begin(); iter != arg_list->second.end(); ++iter) { strings::StrAppend(&inputs, input_op_args_[*iter].op_def_arg.name()); } } return inputs; } void GenTypeScriptOp::AddMethodSignature() { strings::StrAppend(&result_, "export function ", api_def_.endpoint(0).name(), "("); bool is_first = true; for (auto& in_arg : input_op_args_) { if (is_first) { is_first = false; } else { strings::StrAppend(&result_, ", "); } auto op_def_arg = in_arg.op_def_arg; strings::StrAppend(&result_, op_def_arg.name(), ": "); if (IsListAttr(op_def_arg)) { strings::StrAppend(&result_, "tfc.Tensor[]"); } else { strings::StrAppend(&result_, "tfc.Tensor"); } } if (num_outputs_ == 1) { strings::StrAppend(&result_, "): tfc.Tensor {\n"); } else { strings::StrAppend(&result_, "): tfc.Tensor[] {\n"); } } void GenTypeScriptOp::AddOpAttrs() { strings::StrAppend(&result_, " const opAttrs = [\n"); bool is_first = true; for (auto& attr : op_attrs_) { if (is_first) { is_first = false; } else { strings::StrAppend(&result_, ",\n"); } strings::StrAppend(&result_, " "); if (attr.op_def_attr.type() == "type") { strings::StrAppend(&result_, "createTensorsTypeOpAttr('", attr.op_def_attr.name(), "', ", InputForAttr(attr.op_def_attr), ")"); } else if (attr.op_def_attr.type() == "int") { strings::StrAppend(&result_, "{name: '", attr.op_def_attr.name(), "', "); strings::StrAppend(&result_, "type: nodeBackend().binding.TF_ATTR_INT, "); strings::StrAppend(&result_, "value: ", InputForAttr(attr.op_def_attr), ".length}"); } } strings::StrAppend(&result_, "\n ];\n"); } void GenTypeScriptOp::AddMethodReturnAndClose() { strings::StrAppend(&result_, " return null;\n}\n"); } void WriteTSOp(const OpDef& op_def, const ApiDef& api_def, WritableFile* ts) { GenTypeScriptOp ts_op(op_def, api_def); TF_CHECK_OK(ts->Append(GenTypeScriptOp(op_def, api_def).Code())); } void StartFile(WritableFile* ts_file) { const string header = R"header( import * as tfc from '@tensorflow/tfjs-core'; import {createTensorsTypeOpAttr, nodeBackend} from './op_utils'; )header"; TF_CHECK_OK(ts_file->Append(header)); } } void WriteTSOps(const OpList& ops, const ApiDefMap& api_def_map, const string& ts_filename) { Env* env = Env::Default(); std::unique_ptr<WritableFile> ts_file = nullptr; TF_CHECK_OK(env->NewWritableFile(ts_filename, &ts_file)); StartFile(ts_file.get()); for (const auto& op_def : ops.op()) { if (op_def.has_deprecation() && op_def.deprecation().version() <= TF_GRAPH_DEF_VERSION) { continue; } const auto* api_def = api_def_map.GetApiDef(op_def.name()); if (api_def->visibility() == ApiDef::VISIBLE) { WriteTSOp(op_def, *api_def, ts_file.get()); } } TF_CHECK_OK(ts_file->Close()); } }
#include "tensorflow/js/ops/ts_op_gen.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { void ExpectContainsStr(StringPiece s, StringPiece expected) { EXPECT_TRUE(absl::StrContains(s, expected)) << "'" << s << "' does not contain '" << expected << "'"; } void ExpectDoesNotContainStr(StringPiece s, StringPiece expected) { EXPECT_FALSE(absl::StrContains(s, expected)) << "'" << s << "' does not contain '" << expected << "'"; } constexpr char kBaseOpDef[] = R"( op { name: "Foo" input_arg { name: "images" type_attr: "T" number_attr: "N" description: "Images to process." } input_arg { name: "dim" description: "Description for dim." type: DT_FLOAT } output_arg { name: "output" description: "Description for output." type: DT_FLOAT } attr { name: "T" type: "type" description: "Type for images" allowed_values { list { type: DT_UINT8 type: DT_INT8 } } default_value { i: 1 } } attr { name: "N" type: "int" has_minimum: true minimum: 1 } summary: "Summary for op Foo." description: "Description for op Foo." } )"; void GenerateTsOpFileText(const string& op_def_str, const string& api_def_str, string* ts_file_text) { Env* env = Env::Default(); OpList op_defs; protobuf::TextFormat::ParseFromString( op_def_str.empty() ? kBaseOpDef : op_def_str, &op_defs); ApiDefMap api_def_map(op_defs); if (!api_def_str.empty()) { TF_ASSERT_OK(api_def_map.LoadApiDef(api_def_str)); } const string& tmpdir = testing::TmpDir(); const auto ts_file_path = io::JoinPath(tmpdir, "test.ts"); WriteTSOps(op_defs, api_def_map, ts_file_path); TF_ASSERT_OK(ReadFileToString(env, ts_file_path, ts_file_text)); } TEST(TsOpGenTest, TestImports) { string ts_file_text; GenerateTsOpFileText("", "", &ts_file_text); const string expected = R"( import * as tfc from '@tensorflow/tfjs-core'; import {createTensorsTypeOpAttr, nodeBackend} from './op_utils'; )"; ExpectContainsStr(ts_file_text, expected); } TEST(TsOpGenTest, InputSingleAndList) { const string api_def = R"pb( op { graph_op_name: "Foo" arg_order: "dim" arg_order: "images" } )pb"; string ts_file_text; GenerateTsOpFileText("", api_def, &ts_file_text); const string expected = R"( export function Foo(dim: tfc.Tensor, images: tfc.Tensor[]): tfc.Tensor { )"; ExpectContainsStr(ts_file_text, expected); } TEST(TsOpGenTest, TestVisibility) { const string api_def = R"( op { graph_op_name: "Foo" visibility: HIDDEN } )"; string ts_file_text; GenerateTsOpFileText("", api_def, &ts_file_text); const string expected = R"( export function Foo(images: tfc.Tensor[], dim: tfc.Tensor): tfc.Tensor { )"; ExpectDoesNotContainStr(ts_file_text, expected); } TEST(TsOpGenTest, SkipDeprecated) { const string op_def = R"( op { name: "DeprecatedFoo" input_arg { name: "input" type_attr: "T" description: "Description for input." } output_arg { name: "output" description: "Description for output." type: DT_FLOAT } attr { name: "T" type: "type" description: "Type for input" allowed_values { list { type: DT_FLOAT } } } deprecation { explanation: "Deprecated." } } )"; string ts_file_text; GenerateTsOpFileText(op_def, "", &ts_file_text); ExpectDoesNotContainStr(ts_file_text, "DeprecatedFoo"); } TEST(TsOpGenTest, MultiOutput) { const string op_def = R"( op { name: "MultiOutputFoo" input_arg { name: "input" description: "Description for input." type_attr: "T" } output_arg { name: "output1" description: "Description for output 1." type: DT_FLOAT } output_arg { name: "output2" description: "Description for output 2." type: DT_FLOAT } attr { name: "T" type: "type" description: "Type for input" allowed_values { list { type: DT_FLOAT } } } summary: "Summary for op MultiOutputFoo." description: "Description for op MultiOutputFoo." } )"; string ts_file_text; GenerateTsOpFileText(op_def, "", &ts_file_text); const string expected = R"( export function MultiOutputFoo(input: tfc.Tensor): tfc.Tensor[] { )"; ExpectContainsStr(ts_file_text, expected); } TEST(TsOpGenTest, OpAttrs) { string ts_file_text; GenerateTsOpFileText("", "", &ts_file_text); const string expectedFooAttrs = R"( const opAttrs = [ createTensorsTypeOpAttr('T', images), {name: 'N', type: nodeBackend().binding.TF_ATTR_INT, value: images.length} ]; )"; ExpectContainsStr(ts_file_text, expectedFooAttrs); } } }
string GenTypeScriptOp::InputForAttr(const OpDef::AttrDef& op_def_attr) { string inputs; auto arg_list = attr_arg_idx_map_.find(op_def_attr.name()); if (arg_list != attr_arg_idx_map_.end()) { for (auto iter = arg_list->second.begin(); iter != arg_list->second.end(); ++iter) { strings::StrAppend(&inputs, input_op_args_[*iter].op_def_arg.name()); } } return inputs; }
TEST(TsOpGenTest, OpAttrs) { string ts_file_text; GenerateTsOpFileText("", "", &ts_file_text); const string expectedFooAttrs = R"( const opAttrs = [ createTensorsTypeOpAttr('T', images), {name: 'N', type: nodeBackend().binding.TF_ATTR_INT, value: images.length} ]; )"; ExpectContainsStr(ts_file_text, expectedFooAttrs); }
#include "tensorflow/lite/testing/tf_driver.h" #include <fstream> #include <iostream> #include <string> #include "absl/log/check.h" #include "absl/strings/escaping.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/tstring.h" #include "tensorflow/core/public/session.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/lite/string_type.h" #include "tensorflow/lite/string_util.h" #include "tensorflow/lite/testing/join.h" #include "tensorflow/lite/testing/split.h" namespace tflite { namespace testing { namespace { tensorflow::Tensor CreateTensor(const tensorflow::DataType type, const std::vector<int64_t>& dim) { tensorflow::TensorShape shape{absl::Span<const int64_t>{ reinterpret_cast<const int64_t*>(dim.data()), dim.size()}}; return {type, shape}; } template <typename T> int FillTensorWithData(tensorflow::Tensor* tensor, const string& values_as_string) { const auto& values = testing::Split<T>(values_as_string, ","); if (values.size() == tensor->NumElements()) { auto data = tensor->flat<T>(); for (int i = 0; i < values.size(); i++) { data(i) = values[i]; } } return values.size(); } int FillTensorWithTfLiteHexString(tensorflow::Tensor* tensor, const string& values_as_string) { string s = absl::HexStringToBytes(values_as_string); int num_strings = values_as_string.empty() ? 0 : GetStringCount(s.data()); if (num_strings == tensor->NumElements()) { auto data = tensor->flat<tensorflow::tstring>(); for (size_t i = 0; i < num_strings; ++i) { auto ref = GetString(s.data(), i); data(i).assign(ref.str, ref.len); } } return num_strings; } template <typename T> void FillTensorWithZeros(tensorflow::Tensor* tensor) { auto data = tensor->flat<T>(); for (int i = 0; i < tensor->NumElements(); i++) { data(i) = 0; } } template <typename T> string TensorDataToCsvString(const tensorflow::Tensor& tensor) { const auto& data = tensor.flat<T>(); return Join(data.data(), data.size(), ","); } string TensorDataToTfLiteHexString(const tensorflow::Tensor& tensor) { DynamicBuffer dynamic_buffer; auto data = tensor.flat<tensorflow::tstring>(); for (int i = 0; i < tensor.NumElements(); ++i) { dynamic_buffer.AddString(data(i).data(), data(i).size()); } char* char_buffer = nullptr; size_t size = dynamic_buffer.WriteToBuffer(&char_buffer); string s = absl::BytesToHexString({char_buffer, size}); free(char_buffer); return s; } } TfDriver::TfDriver(const std::vector<string>& input_layer, const std::vector<string>& input_layer_type, const std::vector<string>& input_layer_shape, const std::vector<string>& output_layer) : input_names_(input_layer), output_names_(output_layer) { CHECK_EQ(input_layer.size(), input_layer_type.size()); CHECK_EQ(input_layer.size(), input_layer_shape.size()); input_ids_.resize(input_layer.size()); input_tensors_.reserve(input_layer.size()); input_types_.resize(input_layer.size()); input_shapes_.resize(input_layer.size()); for (int i = 0; i < input_layer.size(); i++) { input_ids_[i] = i; input_tensors_[input_layer[i]] = {}; CHECK(DataTypeFromString(input_layer_type[i], &input_types_[i])); input_shapes_[i] = Split<int64_t>(input_layer_shape[i], ","); input_name_to_id_[input_layer[i]] = i; } output_ids_.resize(output_layer.size()); output_tensors_.reserve(output_layer.size()); for (int i = 0; i < output_layer.size(); i++) { output_ids_[i] = i; output_name_to_id_[output_layer[i]] = i; } } void TfDriver::LoadModel(const string& bin_file_path) { if (!IsValid()) return; std::ifstream model(bin_file_path); if (model.fail()) { Invalidate("Failed to find the model " + bin_file_path); return; } tensorflow::GraphDef graphdef; if (!graphdef.ParseFromIstream(&model)) { Invalidate("Failed to parse tensorflow graphdef"); return; } tensorflow::SessionOptions options; session_.reset(tensorflow::NewSession(options)); auto status = session_->Create(graphdef); if (!status.ok()) { Invalidate(absl::StrCat("Failed to create session. ", status.message())); } } void TfDriver::ReshapeTensor(const string& name, const string& csv_values) { if (!IsValid()) return; int id = input_name_to_id_[name]; input_shapes_[id] = Split<int64_t>(csv_values, ","); input_tensors_[input_names_[id]] = CreateTensor(input_types_[id], input_shapes_[id]); ResetTensor(name); } void TfDriver::ResetTensor(const std::string& name) { if (!IsValid()) return; int id = input_name_to_id_[name]; auto tensor = input_tensors_[input_names_[id]]; switch (input_types_[id]) { case tensorflow::DT_FLOAT: { FillTensorWithZeros<float>(&tensor); break; } case tensorflow::DT_INT32: { FillTensorWithZeros<int32_t>(&tensor); break; } default: Invalidate(absl::StrCat("Unsupported tensor type ", input_types_[id], tensorflow::DataType_Name(input_types_[id]), " in ResetInput")); return; } } string TfDriver::ReadOutput(const string& name) { if (!IsValid()) return ""; return ReadOutput(output_tensors_[output_name_to_id_[name]]); } void TfDriver::Invoke(const std::vector<std::pair<string, string>>& inputs) { if (!IsValid()) return; for (const auto& input : inputs) { auto id = input_name_to_id_[input.first]; auto tensor = CreateTensor(input_types_[id], input_shapes_[id]); SetInput(input.second, &tensor); input_tensors_[input_names_[id]] = tensor; } auto status = session_->Run({input_tensors_.begin(), input_tensors_.end()}, output_names_, {}, &output_tensors_); if (!status.ok()) { Invalidate( absl::StrCat("TensorFlow failed to run graph:", status.message())); } } void TfDriver::SetInput(const string& values_as_string, tensorflow::Tensor* tensor) { int num_values_available = 0; switch (tensor->dtype()) { case tensorflow::DT_FLOAT: num_values_available = FillTensorWithData<float>(tensor, values_as_string); break; case tensorflow::DT_INT32: num_values_available = FillTensorWithData<int32_t>(tensor, values_as_string); break; case tensorflow::DT_UINT32: num_values_available = FillTensorWithData<uint32_t>(tensor, values_as_string); break; case tensorflow::DT_UINT8: num_values_available = FillTensorWithData<uint8_t>(tensor, values_as_string); break; case tensorflow::DT_STRING: num_values_available = FillTensorWithTfLiteHexString(tensor, values_as_string); break; default: Invalidate(absl::StrCat("Unsupported tensor type ", tensorflow::DataType_Name(tensor->dtype()), " in SetInput")); return; } if (tensor->NumElements() != num_values_available) { Invalidate(absl::StrCat("Needed ", tensor->NumElements(), " values for input tensor, but was given ", num_values_available, " instead.")); } } string TfDriver::ReadOutput(const tensorflow::Tensor& tensor) { switch (tensor.dtype()) { case tensorflow::DT_FLOAT: return TensorDataToCsvString<float>(tensor); case tensorflow::DT_INT32: return TensorDataToCsvString<int32_t>(tensor); case tensorflow::DT_UINT32: return TensorDataToCsvString<uint32_t>(tensor); case tensorflow::DT_INT64: return TensorDataToCsvString<int64_t>(tensor); case tensorflow::DT_UINT8: return TensorDataToCsvString<uint8_t>(tensor); case tensorflow::DT_STRING: return TensorDataToTfLiteHexString(tensor); case tensorflow::DT_BOOL: return TensorDataToCsvString<bool>(tensor); default: Invalidate(absl::StrCat("Unsupported tensor type ", tensorflow::DataType_Name(tensor.dtype()), " in ReadOutput")); return ""; } } } }
#include "tensorflow/lite/testing/tf_driver.h" #include <algorithm> #include <string> #include <gtest/gtest.h> #include "absl/strings/escaping.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/lite/string_type.h" #include "tensorflow/lite/string_util.h" namespace tflite { namespace testing { namespace { class TestDriver : public TfDriver { public: TestDriver() : TfDriver({}, {}, {}, {}) {} string WriteAndReadBack(tensorflow::DataType type, const std::vector<int64_t>& shape, const string& values) { tensorflow::Tensor t = { type, tensorflow::TensorShape{absl::Span<const int64_t>{ reinterpret_cast<const int64_t*>(shape.data()), shape.size()}}}; SetInput(values, &t); return ReadOutput(t); } }; TEST(TfDriverTest, ReadingAndWritingValues) { TestDriver driver; ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_FLOAT, {1, 2, 2}, "0.10,0.20,0.30,0.40"), "0.100000001,0.200000003,0.300000012,0.400000006"); ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_INT32, {1, 2, 2}, "10,40,100,-100"), "10,40,100,-100"); ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_UINT8, {1, 2, 2}, "48,49,121, 122"), "0,1,y,z"); } TEST(TfDriverTest, ReadingAndWritingValuesStrings) { TestDriver driver; auto set_buffer = [](const std::vector<string>& values, string* buffer) { DynamicBuffer dynamic_buffer; for (const string& s : values) { dynamic_buffer.AddString(s.data(), s.size()); } char* char_b = nullptr; int size = dynamic_buffer.WriteToBuffer(&char_b); *buffer = absl::BytesToHexString(absl::string_view(char_b, size)); free(char_b); }; string buffer; set_buffer({"", "", "", ""}, &buffer); ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, buffer), buffer); ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, ""), buffer); set_buffer({"AB", "ABC", "X", "YZ"}, &buffer); ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, buffer), buffer); } TEST(TfDriverTest, SimpleTest) { std::unique_ptr<TfDriver> runner( new TfDriver({"a", "b", "c", "d"}, {"float", "float", "float", "float"}, {"1,8,8,3", "1,8,8,3", "1,8,8,3", "1,8,8,3"}, {"x", "y"})); runner->LoadModel("tensorflow/lite/testdata/multi_add.pb"); EXPECT_TRUE(runner->IsValid()) << runner->GetErrorMessage(); for (const auto& i : {"a", "b", "c", "d"}) { runner->ReshapeTensor(i, "1,2,2,1"); } ASSERT_TRUE(runner->IsValid()); runner->ResetTensor("c"); runner->Invoke({{"a", "0.1,0.2,0.3,0.4"}, {"b", "0.001,0.002,0.003,0.004"}, {"d", "0.01,0.02,0.03,0.04"}}); ASSERT_EQ(runner->ReadOutput("x"), "0.101000004,0.202000007,0.303000003,0.404000014"); ASSERT_EQ(runner->ReadOutput("y"), "0.0109999999,0.0219999999,0.0329999998,0.0439999998"); } } } }
void TfDriver::LoadModel(const string& bin_file_path) { if (!IsValid()) return; std::ifstream model(bin_file_path); if (model.fail()) { Invalidate("Failed to find the model " + bin_file_path); return; } tensorflow::GraphDef graphdef; if (!graphdef.ParseFromIstream(&model)) { Invalidate("Failed to parse tensorflow graphdef"); return; } tensorflow::SessionOptions options; session_.reset(tensorflow::NewSession(options)); auto status = session_->Create(graphdef); if (!status.ok()) { Invalidate(absl::StrCat("Failed to create session. ", status.message())); } }
TEST(TfDriverTest, SimpleTest) { std::unique_ptr<TfDriver> runner( new TfDriver({"a", "b", "c", "d"}, {"float", "float", "float", "float"}, {"1,8,8,3", "1,8,8,3", "1,8,8,3", "1,8,8,3"}, {"x", "y"})); runner->LoadModel("tensorflow/lite/testdata/multi_add.pb"); EXPECT_TRUE(runner->IsValid()) << runner->GetErrorMessage(); for (const auto& i : {"a", "b", "c", "d"}) { runner->ReshapeTensor(i, "1,2,2,1"); } ASSERT_TRUE(runner->IsValid()); runner->ResetTensor("c"); runner->Invoke({{"a", "0.1,0.2,0.3,0.4"}, {"b", "0.001,0.002,0.003,0.004"}, {"d", "0.01,0.02,0.03,0.04"}}); ASSERT_EQ(runner->ReadOutput("x"), "0.101000004,0.202000007,0.303000003,0.404000014"); ASSERT_EQ(runner->ReadOutput("y"), "0.0109999999,0.0219999999,0.0329999998,0.0439999998"); }
#include "tensorflow/core/kernels/data/rewrite_dataset_op.h" #if !defined(IS_MOBILE_PLATFORM) #include <map> #include <string> #include "tensorflow/core/data/rewrite_utils.h" #include "tensorflow/core/protobuf/rewriter_config.pb.h" namespace tensorflow { namespace data { constexpr const char* const RewriteDatasetOp::kDatasetType; constexpr const char* const RewriteDatasetOp::kInputDataset; constexpr const char* const RewriteDatasetOp::kRewriteName; constexpr const char* const RewriteDatasetOp::kOutputTypes; constexpr const char* const RewriteDatasetOp::kOutputShapes; RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx) : UnaryDatasetOpKernel(ctx) {} void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { tstring rewrite_name; OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kRewriteName, &rewrite_name)); auto config_factory = [rewrite_name]() { RewriterConfig rewriter_config; rewriter_config.add_optimizers(std::string(rewrite_name)); rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE); rewriter_config.set_fail_on_optimizer_errors(true); return rewriter_config; }; core::RefCountPtr<DatasetBase> rewritten; OP_REQUIRES_OK(ctx, RewriteDataset(ctx, input, std::move(config_factory), false, &rewritten)); *output = rewritten.release(); } namespace { REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU), RewriteDatasetOp); } } } #else namespace tensorflow { namespace data { RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx) : UnaryDatasetOpKernel(ctx) {} void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { input->Ref(); *output = input; } namespace { REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU), RewriteDatasetOp); } } } #endif
#include "tensorflow/core/kernels/data/rewrite_dataset_op.h" #include <utility> #include "tensorflow/core/data/dataset_test_base.h" namespace tensorflow { namespace data { namespace { constexpr char kNodeName[] = "rewrite_dataset"; constexpr char kReplicateOnSplit[] = "replicate_on_split"; class RewriteDatasetParams : public DatasetParams { public: template <typename T> RewriteDatasetParams(T input_dataset_params, string rewrite_name, DataTypeVector output_dtypes, std::vector<PartialTensorShape> output_shapes, string node_name) : DatasetParams(std::move(output_dtypes), std::move(output_shapes), std::move(node_name)), rewrite_name_(rewrite_name) { input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params)); iterator_prefix_ = name_utils::IteratorPrefix(input_dataset_params.dataset_type(), input_dataset_params.iterator_prefix()); } std::vector<Tensor> GetInputTensors() const override { return {CreateTensor<tstring>(TensorShape({}), {rewrite_name_})}; } Status GetInputNames(std::vector<string>* input_names) const override { *input_names = {RewriteDatasetOp::kInputDataset, RewriteDatasetOp::kRewriteName}; return absl::OkStatus(); } Status GetAttributes(AttributeVector* attr_vector) const override { attr_vector->emplace_back("output_types", output_dtypes_); attr_vector->emplace_back("output_shapes", output_shapes_); return absl::OkStatus(); } string dataset_type() const override { return RewriteDatasetOp::kDatasetType; } private: string rewrite_name_; }; class RewriteDatasetOpTest : public DatasetOpsTestBase {}; TEST_F(RewriteDatasetOpTest, ReplicateOnSplit) { auto range_dataset_params = RangeDatasetParams(0, 5, 1); auto rewrite_dataset_params = RewriteDatasetParams(std::move(range_dataset_params), kReplicateOnSplit, {DT_INT64}, {PartialTensorShape({})}, kNodeName); std::vector<Tensor> expected_outputs = CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}}); TF_ASSERT_OK(Initialize(rewrite_dataset_params)); TF_EXPECT_OK(CheckIteratorGetNext(expected_outputs, true)); } } } }
void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { tstring rewrite_name; OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kRewriteName, &rewrite_name)); auto config_factory = [rewrite_name]() { RewriterConfig rewriter_config; rewriter_config.add_optimizers(std::string(rewrite_name)); rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE); rewriter_config.set_fail_on_optimizer_errors(true); return rewriter_config; }; core::RefCountPtr<DatasetBase> rewritten; OP_REQUIRES_OK(ctx, RewriteDataset(ctx, input, std::move(config_factory), false, &rewritten)); *output = rewritten.release(); } void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { input->Ref(); *output = input; }
TEST_F(RewriteDatasetOpTest, ReplicateOnSplit) { auto range_dataset_params = RangeDatasetParams(0, 5, 1); auto rewrite_dataset_params = RewriteDatasetParams(std::move(range_dataset_params), kReplicateOnSplit, {DT_INT64}, {PartialTensorShape({})}, kNodeName); std::vector<Tensor> expected_outputs = CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}}); TF_ASSERT_OK(Initialize(rewrite_dataset_params)); TF_EXPECT_OK(CheckIteratorGetNext(expected_outputs, true)); }
#include "tensorflow/core/kernels/batching_util/batch_resource_base.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <functional> #include <iterator> #include <memory> #include <optional> #include <sstream> #include <string> #include <utility> #include <vector> #include "absl/container/fixed_array.h" #include "absl/container/flat_hash_map.h" #include "absl/functional/bind_front.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/synchronization/blocking_counter.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h" #include "absl/types/optional.h" #include "tensorflow/core/common_runtime/cost_constants.h" #include "tensorflow/core/common_runtime/cost_measurement.h" #include "tensorflow/core/common_runtime/cost_measurement_registry.h" #include "tensorflow/core/common_runtime/cost_util.h" #include "tensorflow/core/common_runtime/request_cost.h" #include "tensorflow/core/common_runtime/request_cost_accessor.h" #include "tensorflow/core/common_runtime/request_cost_accessor_registry.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/ops_util.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/kernels/batching_util/batch_scheduler.h" #include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h" #include "tensorflow/core/kernels/batching_util/batch_stats.h" #include "tensorflow/core/kernels/batching_util/concat_split_util.h" #include "tensorflow/core/kernels/batching_util/input_split_metadata.h" #include "tensorflow/core/kernels/batching_util/threadsafe_status.h" #include "tensorflow/core/kernels/batching_util/warmup.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/monitoring/counter.h" #include "tensorflow/core/lib/monitoring/gauge.h" #include "tensorflow/core/lib/monitoring/percentile_sampler.h" #include "tensorflow/core/lib/monitoring/sampler.h" #include "tensorflow/core/lib/monitoring/types.h" #include "tensorflow/core/platform/context.h" #include "tensorflow/core/platform/env_time.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/profiler/lib/traceme_encode.h" #include "tensorflow/core/util/incremental_barrier.h" #include "tsl/platform/criticality.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace serving { namespace { void RecordPaddingSize(int32_t padding_size, const string& model_name, int32_t execution_batch_size, const string& op_name) { static auto* cell = tensorflow::monitoring::PercentileSampler<3>::New( {"/tensorflow/serving/batching/padding_size", "Tracks the padding size distribution on batches by model_name (if " "available).", "model_name", "execution_batch_size", "op_name"}, {25.0, 50.0, 75.0, 90.0, 95.0, 99.0}, 1024, tensorflow::monitoring::UnitOfMeasure::kNumber); cell->GetCell(model_name, absl::StrCat(execution_batch_size), op_name) ->Add(static_cast<double>(padding_size)); } void RecordPaddingSizeV2(int32_t padding_size, const string& model_name, int32_t execution_batch_size, const string& op_name) { std::vector<double> bucket_limits; bucket_limits.push_back(-2.0 / 3.0); double bound = 2.0 / 3.0; double growth_factor = 2; for (int i = 0; i < 16; i++) { bucket_limits.push_back(bound); bound *= growth_factor; } static auto* cell = tensorflow::monitoring::Sampler<3>::New( {"/tensorflow/serving/batching/padding_size_v2", "Tracks the padding size distribution on batches by model_name (if " "available).", "model_name", "execution_batch_size", "op_name"}, monitoring::Buckets::Explicit(bucket_limits)); cell->GetCell(model_name, absl::StrCat(execution_batch_size), op_name) ->Add(static_cast<double>(padding_size)); } void RecordInputBatchSize(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = tensorflow::monitoring::PercentileSampler<2>::New( {"/tensorflow/serving/batching/input_batch_size", "Tracks the batch size distribution on the inputs by model_name (if " "available).", "model_name", "op_name"}, {25.0, 50.0, 75.0, 90.0, 95.0, 99.0}, 1024, tensorflow::monitoring::UnitOfMeasure::kNumber); cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size)); } void RecordInputBatchSizeV2(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = tensorflow::monitoring::Sampler<2>::New( {"/tensorflow/serving/batching/input_batch_size_v2", "Tracks the batch size distribution on the inputs by model_name (if " "available).", "model_name", "op_name"}, monitoring::Buckets::Exponential(2.0 / 3.0, 2, 15)); cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size)); } void RecordBatchSize(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = tensorflow::monitoring::Sampler<2>::New( {"/tensorflow/serving/batching/batch_size", "Tracks the batch size distribution on the batch result by model_name " "(if available).", "model_name", "op_name"}, monitoring::Buckets::Exponential(1, 1.5, 20)); cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size)); } void RecordProcessedBatchSize(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = tensorflow::monitoring::PercentileSampler<2>::New( {"/tensorflow/serving/batching/processed_batch_size", "Tracks the batch size distribution on processing by model_name (if " "available).", "model_name", "op_name"}, {25.0, 50.0, 75.0, 90.0, 95.0, 99.0}, 1024, tensorflow::monitoring::UnitOfMeasure::kNumber); cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size)); } void RecordProcessedBatchSizeV2(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = monitoring::Counter<3>::New( "/tensorflow/serving/batching/processed_batch_size_v2", "Tracks the batch size on processing by model_name and op name (if " "available).", "model_name", "op_name", "batch_size"); cell->GetCell(model_name, op_name, std::to_string(batch_size)) ->IncrementBy(1); } void RecordBatchDelayUs(int64_t batch_delay_us, const string& model_name, const string& op_name, int32_t batch_size) { static auto* cell = monitoring::PercentileSampler<3>::New( {"/tensorflow/serving/batching/batch_delay_us", "Tracks the batching delay (in microseconds) for inputs by model_name " "(if available).", "model_name", "op_name", "processed_batch_size"}, {25.0, 50.0, 75.0, 90.0, 95.0, 99.0}, 1024, monitoring::UnitOfMeasure::kTime); cell->GetCell(model_name, op_name, std::to_string(batch_size)) ->Add(static_cast<double>(batch_delay_us)); } void RecordBatchDelayUsV2(int64_t batch_delay_us, const string& model_name, const string& op_name, int32_t batch_size) { static auto* cell = tensorflow::monitoring::Sampler<3>::New( {"/tensorflow/serving/batching/batch_delay_us_v2", "Tracks the batching delay (in microseconds) for inputs by model_name " "(if available).", "model_name", "op_name", "processed_batch_size"}, monitoring::Buckets::Exponential(1, 2, 27)); cell->GetCell(model_name, op_name, std::to_string(batch_size)) ->Add(static_cast<double>(batch_delay_us)); } void RecordBatchParamBatchTimeoutMicros(int64_t batch_timeout_micros, const string& model_name, const string& op_name) { static auto* cell = monitoring::Gauge<int64_t, 2>::New( "/tensorflow/serving/batching/batch_timeout_micros", "Tracks how long a request can wait before being processed by a batch.", "model_name", "op_name"); cell->GetCell(model_name, op_name)->Set(batch_timeout_micros); } void RecordBatchParamMaxBatchSize(int64_t max_batch_size, const string& model_name, const string& op_name) { static auto* cell = monitoring::Gauge<int64_t, 2>::New( "/tensorflow/serving/batching/max_batch_size", "Tracks the maximum size of a batch.", "model_name", "op_name"); cell->GetCell(model_name, op_name)->Set(max_batch_size); } void RecordBatchParamMaxEnqueuedBatches(int64_t max_enqueued_batches, const string& model_name, const string& op_name) { static auto* cell = monitoring::Gauge<int64_t, 2>::New( "/tensorflow/serving/batching/max_enqueued_batches", "Tracks the maximum number of enqueued batches.", "model_name", "op_name"); cell->GetCell(model_name, op_name)->Set(max_enqueued_batches); } void RecordBatchParamAllowedBatchSizes(const string& allowed_batch_sizes, const string& model_name, const string& op_name) { static auto* cell = monitoring::Gauge<string, 2>::New( "/tensorflow/serving/batching/allowed_batch_sizes", "Tracks the sizes that are allowed to form a batch.", "model_name", "op_name"); cell->GetCell(model_name, op_name)->Set(allowed_batch_sizes); } void RecordBatchCosts(const std::string& model_name, const int64_t processed_size, const absl::string_view cost_type, const absl::Duration total_cost) { static auto* cell = tensorflow::monitoring::Sampler<3>::New( {"/tensorflow/serving/batching/costs", "Tracks the batch costs (in microseconds) by model name and processed " "size.", "model_name", "processed_size", "cost_type"}, monitoring::Buckets::Exponential(1, 2, 27)); cell->GetCell(model_name, std::to_string(processed_size), std::string(cost_type)) ->Add(absl::ToDoubleMicroseconds(total_cost)); } const string& GetModelName(OpKernelContext* ctx) { static string* kModelNameUnset = new string("model_name_unset"); if (!ctx->session_metadata()) return *kModelNameUnset; if (ctx->session_metadata()->name().empty()) return *kModelNameUnset; return ctx->session_metadata()->name(); } int GetTotalTaskSize( const std::vector<std::unique_ptr<BatchResourceBase::BatchTask>>& tasks) { int tasks_size = 0; for (const auto& task : tasks) { tasks_size += task->size(); } return tasks_size; } } std::unique_ptr<BatchResourceBase::BatchTask> BatchResourceBase::BatchTask::CreateSplitTask( int split_index, AsyncOpKernel::DoneCallback done_callback) { std::unique_ptr<BatchTask> task = CreateDerivedTask(); task->guid = this->guid; task->propagated_context = Context(ContextKind::kThread); task->inputs.reserve(this->inputs.size()); task->captured_inputs = this->captured_inputs; task->context = this->context; task->done_callback = done_callback; task->split_index = split_index; task->output = this->output; task->status = this->status; task->is_partial = true; task->start_time = this->start_time; task->request_cost = this->request_cost; task->forced_warmup_batch_size = this->forced_warmup_batch_size; return task; } using ::tensorflow::concat_split_util::Concat; using ::tensorflow::concat_split_util::Split; using TensorMatrix = std::vector<std::vector<Tensor>>; string GetTensorNamesAndShapesString(const OpKernelContext* context, const OpInputList& tensors) { std::stringstream out; int i = 0; for (const Tensor& tensor : tensors) { out << " - " << context->op_kernel().requested_input(i++) << " has shape " << tensor.shape().DebugString() << "\n"; } return out.str(); } Status BatchResourceBase::RegisterWarmupInputs( int64_t guid, OpKernelContext* context, const string& batcher_queue_name, const CreateBatchTaskFn& create_batch_task_fn, AsyncOpKernel::DoneCallback done) { auto shared_status = std::make_shared<ThreadSafeStatus>(); auto create_batch_task_fn_share_status = [&create_batch_task_fn, &shared_status]() { auto batch_task = create_batch_task_fn(); if (!batch_task.ok()) { return batch_task; } (*batch_task)->status = shared_status; return batch_task; }; auto warmup_counter = std::make_shared<absl::BlockingCounter>(allowed_batch_sizes_.size()); for (int i = 0; i < allowed_batch_sizes_.size(); ++i) { Status status = RegisterInput( guid, context, batcher_queue_name, create_batch_task_fn_share_status, [warmup_counter = warmup_counter.get()]() { warmup_counter->DecrementCount(); }, allowed_batch_sizes_[i]); if (!status.ok()) return status; } return RegisterInput( guid, context, batcher_queue_name, create_batch_task_fn_share_status, [warmup_counter, context, shared_status, done = std::move(done)]() { warmup_counter->Wait(); context->SetStatus(shared_status->status()); done(); }); } Status BatchResourceBase::RegisterInput( int64_t guid, OpKernelContext* context, const string& batcher_queue_name, const CreateBatchTaskFn& create_batch_task_fn, AsyncOpKernel::DoneCallback done_callback, int forced_warmup_batch_size) { TF_ASSIGN_OR_RETURN(std::unique_ptr<BatchTask> batch_components, create_batch_task_fn()); batch_components->start_time = EnvTime::NowNanos(); batch_components->guid = guid; batch_components->propagated_context = Context(ContextKind::kThread); OpInputList tensors; TF_RETURN_IF_ERROR(context->input_list("in_tensors", &tensors)); batch_components->inputs.reserve(tensors.size()); for (const Tensor& tensor : tensors) { if (tensor.shape().dims() == 0) { return errors::InvalidArgument( "Batching input tensors must have at least one dimension.\nBelow are " "the input tensors: \n", GetTensorNamesAndShapesString(context, tensors)); } if (tensors.size() >= 2 && tensor.shape().dim_size(0) != tensors[0].shape().dim_size(0)) { return errors::InvalidArgument( "Batching input tensors supplied in a given op invocation must " "have equal 0th-dimension size.\nBelow are the input tensors: \n", GetTensorNamesAndShapesString(context, tensors)); } batch_components->inputs.push_back(tensor); } RecordInputBatchSize(tensors[0].shape().dim_size(0), GetModelName(context), context->op_kernel().name()); RecordInputBatchSizeV2(tensors[0].shape().dim_size(0), GetModelName(context), context->op_kernel().name()); if (batcher_) { RecordBatchParamBatchTimeoutMicros( batcher_queue_options_.batch_timeout_micros, GetModelName(context), context->op_kernel().name()); RecordBatchParamMaxBatchSize( batcher_queue_options_.max_execution_batch_size, GetModelName(context), context->op_kernel().name()); RecordBatchParamMaxEnqueuedBatches( batcher_queue_options_.max_enqueued_batches, GetModelName(context), context->op_kernel().name()); } else if (adaptive_batcher_) { RecordBatchParamBatchTimeoutMicros( adaptive_batcher_queue_options_.batch_timeout_micros, GetModelName(context), context->op_kernel().name()); RecordBatchParamMaxBatchSize(adaptive_batcher_queue_options_.max_batch_size, GetModelName(context), context->op_kernel().name()); RecordBatchParamMaxEnqueuedBatches( adaptive_batcher_queue_options_.max_enqueued_batches, GetModelName(context), context->op_kernel().name()); } else { return errors::Internal("No batcher defined."); } RecordBatchParamAllowedBatchSizes(allowed_batch_sizes_str_, GetModelName(context), context->op_kernel().name()); if (tensors[0].shape().dim_size(0) == 0) { for (int i = 0; i < context->num_outputs(); i++) { Tensor* empty_output; AllocatorAttributes cpu_alloc; cpu_alloc.set_on_host(true); TF_RETURN_IF_ERROR(context->allocate_output(i, TensorShape({0}), &empty_output, cpu_alloc)); } done_callback(); return absl::OkStatus(); } OpInputList captured_tensors; const auto captured_status = context->input_list("captured_tensors", &captured_tensors); if (captured_status.ok()) { batch_components->captured_inputs.reserve(captured_tensors.size()); for (const Tensor& captured_tensor : captured_tensors) { batch_components->captured_inputs.push_back(captured_tensor); } } batch_components->context = context; batch_components->split_index = 0; batch_components->output = std::make_shared<TensorMatrix>(); if (!batch_components->status) { batch_components->status = std::make_shared<ThreadSafeStatus>(); batch_components->done_callback = [done_callback = std::move(done_callback), shared_status = batch_components->status, context = context]() { context->SetStatus(shared_status->status()); done_callback(); }; } else { batch_components->done_callback = std::move(done_callback); } batch_components->forced_warmup_batch_size = forced_warmup_batch_size; std::unique_ptr<RequestCostAccessor> request_cost_accessor = CreateRequestCostAccessor(); if (request_cost_accessor) { batch_components->request_cost = request_cost_accessor->GetRequestCost(); } BatcherQueueT* batcher_queue; TF_RETURN_IF_ERROR( LookupOrCreateBatcherQueue(batcher_queue_name, &batcher_queue)); if (!session_metadata().name().empty()) { absl::MutexLock lock(&outstanding_batch_mu_); WarmupStateRegistry::Key key(session_metadata().name(), session_metadata().version()); if (GetGlobalWarmupStateRegistry().Lookup(key)) { outstanding_batch_mu_.Await({+[](int* num_outstanding_batched_items) { return *num_outstanding_batched_items == 0; }, &num_outstanding_batched_items_}); } num_outstanding_batched_items_ += batch_components->size(); } return batcher_queue->Schedule(&batch_components); } BatchResourceBase::BatcherT::QueueOptions BatchResourceBase::GetBatcherQueueOptions( int32_t num_batch_threads, int32_t max_batch_size, int32_t batch_timeout_micros, int32_t max_enqueued_batches, const std::vector<int32>& allowed_batch_sizes, bool enable_large_batch_splitting, bool disable_padding) { return GetBatcherQueueOptions( num_batch_threads, max_batch_size, batch_timeout_micros, max_enqueued_batches, allowed_batch_sizes, enable_large_batch_splitting, disable_padding, 0, 0, 0, {}, MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize); } BatchResourceBase::BatcherT::QueueOptions BatchResourceBase::GetBatcherQueueOptions( int32_t num_batch_threads, int32_t max_batch_size, int32_t batch_timeout_micros, int32_t max_enqueued_batches, const std::vector<int32>& allowed_batch_sizes, bool enable_large_batch_splitting, bool disable_padding, int32_t low_priority_max_batch_size, int32_t low_priority_batch_timeout_micros, int32_t low_priority_max_enqueued_batches, const std::vector<int32>& low_priority_allowed_batch_sizes, MixedPriorityBatchingPolicy mixed_priority_batching_policy) { BatcherT::QueueOptions batcher_queue_options; batcher_queue_options.input_batch_size_limit = max_batch_size; batcher_queue_options.max_enqueued_batches = max_enqueued_batches; batcher_queue_options.batch_timeout_micros = batch_timeout_micros; if (low_priority_max_batch_size > 0) { batcher_queue_options.enable_priority_queue = true; } batcher_queue_options.high_priority_queue_options.input_batch_size_limit = max_batch_size; batcher_queue_options.high_priority_queue_options.max_enqueued_batches = max_enqueued_batches; batcher_queue_options.high_priority_queue_options.batch_timeout_micros = batch_timeout_micros; batcher_queue_options.low_priority_queue_options.input_batch_size_limit = low_priority_max_batch_size; batcher_queue_options.low_priority_queue_options.max_enqueued_batches = low_priority_max_enqueued_batches; batcher_queue_options.low_priority_queue_options.batch_timeout_micros = low_priority_batch_timeout_micros; if (low_priority_allowed_batch_sizes.empty()) { batcher_queue_options.low_priority_queue_options.max_execution_batch_size = low_priority_max_batch_size; } else { batcher_queue_options.low_priority_queue_options.max_execution_batch_size = *low_priority_allowed_batch_sizes.rbegin(); } batcher_queue_options.low_priority_queue_options.allowed_batch_sizes = low_priority_allowed_batch_sizes; batcher_queue_options.mixed_priority_batching_policy = mixed_priority_batching_policy; batcher_queue_options.enable_large_batch_splitting = enable_large_batch_splitting; if (enable_large_batch_splitting) { batcher_queue_options.split_input_task_func = [](std::unique_ptr<BatchTask>* input_task, int open_batch_remaining_slot, int max_batch_size, std::vector<std::unique_ptr<BatchTask>>* output_tasks) -> Status { return SplitInputTask(input_task, open_batch_remaining_slot, max_batch_size, output_tasks); }; if (allowed_batch_sizes.empty()) { batcher_queue_options.max_execution_batch_size = max_batch_size; batcher_queue_options.high_priority_queue_options .max_execution_batch_size = max_batch_size; } else { batcher_queue_options.max_execution_batch_size = *allowed_batch_sizes.rbegin(); batcher_queue_options.high_priority_queue_options .max_execution_batch_size = *allowed_batch_sizes.rbegin(); batcher_queue_options.allowed_batch_sizes = allowed_batch_sizes; } } batcher_queue_options.disable_padding = disable_padding; return batcher_queue_options; } BatchResourceBase::AdaptiveBatcherT::QueueOptions BatchResourceBase::GetAdaptiveBatcherQueueOptions( int32_t max_batch_size, int32_t batch_timeout_micros, int32_t max_enqueued_batches, bool enable_large_batch_splitting, const std::vector<int32>& allowed_batch_sizes, bool disable_padding) { AdaptiveBatcherT::QueueOptions batcher_queue_options; batcher_queue_options.max_input_task_size = std::make_optional(max_batch_size); batcher_queue_options.max_enqueued_batches = max_enqueued_batches; batcher_queue_options.batch_timeout_micros = batch_timeout_micros; if (allowed_batch_sizes.empty()) { batcher_queue_options.max_batch_size = max_batch_size; } else { batcher_queue_options.max_batch_size = *allowed_batch_sizes.rbegin(); } if (enable_large_batch_splitting) { batcher_queue_options.split_input_task_func = [](std::unique_ptr<BatchTask>* input_task, int open_batch_remaining_slot, int max_batch_size, std::vector<std::unique_ptr<BatchTask>>* output_tasks) -> Status { return SplitInputTask(input_task, open_batch_remaining_slot, max_batch_size, output_tasks); }; } batcher_queue_options.disable_padding = disable_padding; return batcher_queue_options; } Status BatchResourceBase::ValidateBatch(const BatchT& batch) { for (int task_idx = 0; task_idx < batch.num_tasks(); ++task_idx) { const BatchResourceBase::BatchTask& task = batch.task(task_idx); if (task.inputs.size() != batch.task(0).inputs.size()) { return errors::InvalidArgument( "Batching inputs must have equal number of edges"); } } return absl::OkStatus(); } bool BatchResourceBase::IsLowPriorityBatch(const BatchT& batch) const { if (!batcher_queue_options_.enable_priority_queue) return false; if (batch.empty()) return false; return batch.task(0).criticality() == tsl::criticality::Criticality::kSheddablePlus || batch.task(0).criticality() == tsl::criticality::Criticality::kSheddable; } int BatchResourceBase::RoundToLowestAllowedBatchSize( int batch_size, bool is_low_priority_batch) const { const std::vector<int32>& allowed_batch_sizes = is_low_priority_batch ? batcher_queue_options_.low_priority_queue_options .allowed_batch_sizes : allowed_batch_sizes_; return GetNextAllowedBatchSize(batch_size, allowed_batch_sizes, batcher_queue_options_.disable_padding); } Status BatchResourceBase::ConcatInputTensors( const BatchT& batch, const std::vector<std::unique_ptr<BatchTask>>& unbatched_tasks, OpKernelContext* context, std::vector<Tensor>* concatenated_tensors) const { if (batch.num_tasks() == 0) { return errors::InvalidArgument("Empty batch."); } int unbatched_tasks_size = GetTotalTaskSize(unbatched_tasks); const bool just_for_warmup = batch.task(0).forced_warmup_batch_size > 0; const int padded_batch_size = just_for_warmup ? batch.task(0).forced_warmup_batch_size : RoundToLowestAllowedBatchSize(batch.size() + unbatched_tasks_size, IsLowPriorityBatch(batch)); const int padding_amount = just_for_warmup ? padded_batch_size : padded_batch_size - batch.size() - unbatched_tasks_size; tsl::profiler::TraceMe trace_me( [padded_batch_size, padding_amount, disable_padding = batcher_queue_options_.disable_padding]() { return tsl::profiler::TraceMeEncode( "ConcatInputTensors", {{"batch_size_after_padding", padded_batch_size}, {"padding_amount", padding_amount}, {"disable_padding", disable_padding}}); }); RecordPaddingSize(padding_amount, GetModelName(context), padded_batch_size, context->op_kernel().name()); RecordPaddingSizeV2(padding_amount, GetModelName(context), padded_batch_size, context->op_kernel().name()); RecordProcessedBatchSize(padded_batch_size, GetModelName(context), context->op_kernel().name()); RecordProcessedBatchSizeV2(padded_batch_size, GetModelName(context), context->op_kernel().name()); RecordBatchSize(batch.size(), GetModelName(context), context->op_kernel().name()); const int num_inputs = batch.task(0).inputs.size(); concatenated_tensors->reserve(num_inputs); for (int i = 0; i < num_inputs; ++i) { std::vector<Tensor> to_concatenate; if (just_for_warmup) { to_concatenate.reserve(padding_amount); } else { to_concatenate.reserve(batch.num_tasks() + unbatched_tasks.size() + padding_amount); for (int task_idx = 0; task_idx < batch.num_tasks(); ++task_idx) { to_concatenate.push_back(batch.task(task_idx).inputs.at(i)); } for (int task_idx = 0; task_idx < unbatched_tasks.size(); ++task_idx) { to_concatenate.push_back(unbatched_tasks[task_idx]->inputs.at(i)); } } if (padding_amount != 0) { const Tensor& padding_source = batch.task(0).inputs.at(i); Tensor padding; if (padding_source.shape().dim_size(0) == 0) { return errors::InvalidArgument( "Cannot use an empty tensor with zero rows as padding when " "batching. (Input ", i, " got shape ", padding_source.shape().DebugString(), ".)"); } if (padding_source.shape().dim_size(0) == 1) { padding = padding_source; } else { padding = padding_source.Slice(0, 1); } for (int i = 0; i < padding_amount; ++i) { to_concatenate.push_back(padding); } } Tensor concatenated_tensor; Status concat_status =
#include "tensorflow/core/kernels/batching_util/batch_resource_base.h" #include <cstdint> #include <memory> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "tensorflow/core/common_runtime/cost_constants.h" #include "tensorflow/core/common_runtime/cost_measurement.h" #include "tensorflow/core/common_runtime/cost_measurement_registry.h" #include "tensorflow/core/common_runtime/request_cost.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/batching_util/batch_stats.h" #include "tsl/platform/criticality.h" namespace tensorflow { namespace serving { namespace { using ::testing::Pair; using ::testing::UnorderedElementsAre; TEST(BatchTaskCriticalityTest, CriticalityDefaultsToCritical) { BatchResourceBase::BatchTask batch_task; EXPECT_EQ(batch_task.criticality(), tsl::criticality::Criticality::kCritical); } #if defined(PLATFORM_GOOGLE) TEST(BatchTaskCriticalityTest, CriticalitySuccessfullyPropagated) { std::vector<BatchResourceBase::BatchTask> batch_tasks; { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kCriticalPlus); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCriticalPlus); batch_tasks.push_back(BatchResourceBase::BatchTask()); } { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kCritical); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCritical); batch_tasks.push_back(BatchResourceBase::BatchTask()); } { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddablePlus); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddablePlus); batch_tasks.push_back(BatchResourceBase::BatchTask()); } { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddable); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddable); batch_tasks.push_back(BatchResourceBase::BatchTask()); } batch_tasks.push_back(BatchResourceBase::BatchTask()); EXPECT_EQ(batch_tasks[0].criticality(), tsl::criticality::Criticality::kCriticalPlus); EXPECT_EQ(batch_tasks[1].criticality(), tsl::criticality::Criticality::kCritical); EXPECT_EQ(batch_tasks[2].criticality(), tsl::criticality::Criticality::kSheddablePlus); EXPECT_EQ(batch_tasks[3].criticality(), tsl::criticality::Criticality::kSheddable); EXPECT_EQ(batch_tasks[4].criticality(), tsl::criticality::Criticality::kCritical); } #endif class TestTpuCostMeasurement : public CostMeasurement { public: using CostMeasurement::CostMeasurement; absl::Duration GetTotalCost() override { return absl::Milliseconds(100); } absl::string_view GetCostType() const override { return "test_tpu"; } }; REGISTER_COST_MEASUREMENT("test_tpu", TestTpuCostMeasurement); class TestGcuCostMeasurement : public CostMeasurement { public: using CostMeasurement::CostMeasurement; absl::Duration GetTotalCost() override { return absl::Milliseconds(200); } absl::string_view GetCostType() const override { return "test_gcu"; } }; REGISTER_COST_MEASUREMENT("test_gcu", TestGcuCostMeasurement); std::unique_ptr<BatchResourceBase::BatchTask> MakeBatchTask( const int64_t task_size, RequestCost* request_cost) { auto task = std::make_unique<BatchResourceBase::BatchTask>(); task->inputs.push_back(Tensor(DT_DOUBLE, TensorShape({task_size, 1}))); task->request_cost = request_cost; return task; } TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnNoCostMeasurement) { BatchResourceBase::BatchT batch; RequestCost cost; batch.AddTask(MakeBatchTask(1, &cost)); batch.Close(); std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 16, batch); EXPECT_TRUE(batch.task(0).request_cost->GetCosts().empty()); EXPECT_THAT(batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 16, 1, 15, ::testing::IsEmpty()))); } TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnZeroCost) { BatchResourceBase::BatchT batch; RequestCost cost; batch.AddTask(MakeBatchTask(1, &cost)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("no_op", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 16, batch); EXPECT_TRUE(batch.task(0).request_cost->GetCosts().empty()); EXPECT_THAT(batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 16, 1, 15, ::testing::IsEmpty()))); } TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnZeroBatchSize) { BatchResourceBase::BatchT batch; batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 0, batch); } TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnNoRequestCost) { BatchResourceBase::BatchT batch; batch.AddTask(MakeBatchTask(1, nullptr)); batch.AddTask(MakeBatchTask(9, nullptr)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 16, batch); EXPECT_EQ(batch.task(0).request_cost, nullptr); EXPECT_EQ(batch.task(1).request_cost, nullptr); } TEST(SplitBatchCostsAndRecordMetricsTest, SplitSingleCostType) { BatchResourceBase::BatchT batch; RequestCost cost1, cost2; batch.AddTask(MakeBatchTask(1, &cost1)); batch.AddTask(MakeBatchTask(9, &cost2)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 20, batch); EXPECT_THAT( batch.task(0).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)), Pair("test_tpu_no_smear", absl::Milliseconds(5)))); EXPECT_THAT( batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 1, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)))))); EXPECT_THAT( batch.task(1).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)), Pair("test_tpu_no_smear", absl::Milliseconds(45)))); EXPECT_THAT( batch.task(1).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 9, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)))))); } TEST(SplitBatchCostsAndRecordMetricsTest, SplitMultiCostTypes) { BatchResourceBase::BatchT batch; RequestCost cost1, cost2; batch.AddTask(MakeBatchTask(1, &cost1)); batch.AddTask(MakeBatchTask(9, &cost2)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_gcu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 20, batch); EXPECT_THAT( batch.task(0).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)), Pair("test_tpu_no_smear", absl::Milliseconds(5)), Pair("test_gcu_with_smear", absl::Milliseconds(20)), Pair("test_gcu_no_smear", absl::Milliseconds(10)))); EXPECT_THAT( batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 1, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)), Pair("test_gcu", absl::Milliseconds(200)))))); EXPECT_THAT( batch.task(1).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)), Pair("test_tpu_no_smear", absl::Milliseconds(45)), Pair("test_gcu_with_smear", absl::Milliseconds(180)), Pair("test_gcu_no_smear", absl::Milliseconds(90)))); EXPECT_THAT( batch.task(1).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 9, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)), Pair("test_gcu", absl::Milliseconds(200)))))); } TEST(SplitBatchCostsAndRecordMetricsTest, SplitOnlyNonZeroCostTypes) { BatchResourceBase::BatchT batch; RequestCost cost1, cost2; batch.AddTask(MakeBatchTask(1, &cost1)); batch.AddTask(MakeBatchTask(9, &cost2)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("no_op", context)); batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 20, batch); EXPECT_THAT( batch.task(0).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)), Pair("test_tpu_no_smear", absl::Milliseconds(5)))); EXPECT_THAT( batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 1, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)))))); EXPECT_THAT( batch.task(1).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)), Pair("test_tpu_no_smear", absl::Milliseconds(45)))); EXPECT_THAT( batch.task(1).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 9, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)))))); } TEST(SplitBatchCostsAndRecordMetricsTest, UpdatesGlobalBatchStats) { class FakeTpuCostMeasurement : public CostMeasurement { public: using CostMeasurement::CostMeasurement; absl::Duration GetTotalCost() override { return absl::Hours(555); } absl::string_view GetCostType() const override { return kTpuCostName; } }; CostMeasurement::Context context{ false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( std::make_unique<FakeTpuCostMeasurement>(context)); BatchResourceBase::BatchT batch; batch.AddTask(MakeBatchTask( 1, nullptr)); batch.Close(); const char kModelName[] = __FILE__; BatchResourceBase::SplitBatchCostsAndRecordMetrics( kModelName, "op_name", batch_cost_measurements, 17, batch); EXPECT_EQ(GlobalBatchStats() .model( kModelName, "op_name") .batch_size(17) .tpu_cost() .mean(), absl::Hours(555)); } } } }
void RecordInputBatchSizeV2(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = tensorflow::monitoring::Sampler<2>::New( {"/tensorflow/serving/batching/input_batch_size_v2", "Tracks the batch size distribution on the inputs by model_name (if " "available).", "model_name", "op_name"}, monitoring::Buckets::Exponential(2.0 / 3.0, 2, 15)); cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size)); }
#include "tensorflow/core/lib/strings/ordered_code.h" #include <assert.h> #include <stddef.h> #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/stringpiece.h" #include "tsl/lib/core/bits.h" namespace tensorflow { namespace strings { static const char kEscape1 = '\000'; static const char kNullCharacter = '\xff'; static const char kSeparator = '\001'; static const char kEscape2 = '\xff'; static const char kFFCharacter = '\000'; static const char kEscape1_Separator[2] = {kEscape1, kSeparator}; inline static void AppendBytes(string* dest, const char* src, size_t len) { dest->append(src, len); } inline bool IsSpecialByte(char c) { return (static_cast<unsigned char>(c + 1)) < 2; } inline const char* SkipToNextSpecialByte(const char* start, const char* limit) { DCHECK_EQ(kEscape1, 0); DCHECK_EQ(kEscape2 & 0xffu, 255u); const char* p = start; while (p < limit && !IsSpecialByte(*p)) { p++; } return p; } const char* OrderedCode::TEST_SkipToNextSpecialByte(const char* start, const char* limit) { return SkipToNextSpecialByte(start, limit); } inline static void EncodeStringFragment(string* dest, StringPiece s) { const char* p = s.data(); const char* limit = p + s.size(); const char* copy_start = p; while (true) { p = SkipToNextSpecialByte(p, limit); if (p >= limit) break; char c = *(p++); DCHECK(IsSpecialByte(c)); if (c == kEscape1) { AppendBytes(dest, copy_start, p - copy_start - 1); dest->push_back(kEscape1); dest->push_back(kNullCharacter); copy_start = p; } else { assert(c == kEscape2); AppendBytes(dest, copy_start, p - copy_start - 1); dest->push_back(kEscape2); dest->push_back(kFFCharacter); copy_start = p; } } if (p > copy_start) { AppendBytes(dest, copy_start, p - copy_start); } } void OrderedCode::WriteString(string* dest, StringPiece s) { EncodeStringFragment(dest, s); AppendBytes(dest, kEscape1_Separator, 2); } void OrderedCode::WriteNumIncreasing(string* dest, uint64 val) { unsigned char buf[9]; int len = 0; while (val > 0) { len++; buf[9 - len] = (val & 0xff); val >>= 8; } buf[9 - len - 1] = len; len++; AppendBytes(dest, reinterpret_cast<const char*>(buf + 9 - len), len); } inline static bool ReadStringInternal(StringPiece* src, string* result) { const char* start = src->data(); const char* string_limit = src->data() + src->size(); const char* limit = string_limit - 1; const char* copy_start = start; while (true) { start = SkipToNextSpecialByte(start, limit); if (start >= limit) break; const char c = *(start++); DCHECK(IsSpecialByte(c)); if (c == kEscape1) { if (result) { AppendBytes(result, copy_start, start - copy_start - 1); } const char next = *(start++); if (next == kSeparator) { src->remove_prefix(start - src->data()); return true; } else if (next == kNullCharacter) { if (result) { *result += '\0'; } } else { return false; } copy_start = start; } else { assert(c == kEscape2); if (result) { AppendBytes(result, copy_start, start - copy_start - 1); } const char next = *(start++); if (next == kFFCharacter) { if (result) { *result += '\xff'; } } else { return false; } copy_start = start; } } return false; } bool OrderedCode::ReadString(StringPiece* src, string* result) { return ReadStringInternal(src, result); } bool OrderedCode::ReadNumIncreasing(StringPiece* src, uint64* result) { if (src->empty()) { return false; } const size_t len = static_cast<unsigned char>((*src)[0]); DCHECK(0 == len || src->size() == 1 || (*src)[1] != '\0') << "invalid encoding"; if (len + 1 > src->size() || len > 8) { return false; } if (result) { uint64 tmp = 0; for (size_t i = 0; i < len; i++) { tmp <<= 8; tmp |= static_cast<unsigned char>((*src)[1 + i]); } *result = tmp; } src->remove_prefix(len + 1); return true; } void OrderedCode::TEST_Corrupt(string* str, int k) { int seen_seps = 0; for (size_t i = 0; i + 1 < str->size(); i++) { if ((*str)[i] == kEscape1 && (*str)[i + 1] == kSeparator) { seen_seps++; if (seen_seps == k) { (*str)[i + 1] = kSeparator + 1; return; } } } } static const int kMaxSigned64Length = 10; static const char kLengthToHeaderBits[1 + kMaxSigned64Length][2] = { {0, 0}, {'\x80', 0}, {'\xc0', 0}, {'\xe0', 0}, {'\xf0', 0}, {'\xf8', 0}, {'\xfc', 0}, {'\xfe', 0}, {'\xff', 0}, {'\xff', '\x80'}, {'\xff', '\xc0'}}; static const uint64 kLengthToMask[1 + kMaxSigned64Length] = { 0ULL, 0x80ULL, 0xc000ULL, 0xe00000ULL, 0xf0000000ULL, 0xf800000000ULL, 0xfc0000000000ULL, 0xfe000000000000ULL, 0xff00000000000000ULL, 0x8000000000000000ULL, 0ULL}; static const int8 kBitsToLength[1 + 63] = { 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10}; static inline int SignedEncodingLength(int64_t n) { return kBitsToLength[tsl::Log2Floor64(n < 0 ? ~n : n) + 1]; } static void StoreBigEndian64(char* dst, uint64 v) { for (int i = 0; i < 8; i++) { dst[i] = (v >> (56 - 8 * i)) & 0xff; } } static uint64 LoadBigEndian64(const char* src) { uint64 result = 0; for (int i = 0; i < 8; i++) { unsigned char c = static_cast<unsigned char>(src[i]); result |= static_cast<uint64>(c) << (56 - 8 * i); } return result; } void OrderedCode::WriteSignedNumIncreasing(string* dest, int64_t val) { const uint64 x = val < 0 ? ~val : val; if (x < 64) { *dest += kLengthToHeaderBits[1][0] ^ val; return; } const char sign_byte = val < 0 ? '\xff' : '\0'; char buf[10] = { sign_byte, sign_byte, }; StoreBigEndian64(buf + 2, val); static_assert(sizeof(buf) == kMaxSigned64Length, "max length size mismatch"); const int len = SignedEncodingLength(x); DCHECK_GE(len, 2); char* const begin = buf + sizeof(buf) - len; begin[0] ^= kLengthToHeaderBits[len][0]; begin[1] ^= kLengthToHeaderBits[len][1]; dest->append(begin, len); } bool OrderedCode::ReadSignedNumIncreasing(StringPiece* src, int64_t* result) { if (src->empty()) return false; const uint64 xor_mask = (!((*src)[0] & 0x80)) ? ~0ULL : 0ULL; const unsigned char first_byte = (*src)[0] ^ (xor_mask & 0xff); int len; uint64 x; if (first_byte != 0xff) { len = 7 - tsl::Log2Floor64(first_byte ^ 0xff); if (src->size() < static_cast<size_t>(len)) return false; x = xor_mask; for (int i = 0; i < len; ++i) x = (x << 8) | static_cast<unsigned char>((*src)[i]); } else { len = 8; if (src->size() < static_cast<size_t>(len)) return false; const unsigned char second_byte = (*src)[1] ^ (xor_mask & 0xff); if (second_byte >= 0x80) { if (second_byte < 0xc0) { len = 9; } else { const unsigned char third_byte = (*src)[2] ^ (xor_mask & 0xff); if (second_byte == 0xc0 && third_byte < 0x80) { len = 10; } else { return false; } } if (src->size() < static_cast<size_t>(len)) return false; } x = LoadBigEndian64(src->data() + len - 8); } x ^= kLengthToMask[len]; DCHECK_EQ(len, SignedEncodingLength(x)) << "invalid encoding"; if (result) *result = x; src->remove_prefix(len); return true; } } }
#include "tensorflow/core/lib/strings/ordered_code.h" #include <float.h> #include <stddef.h> #include <limits> #include <vector> #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace strings { namespace { string RandomString(random::SimplePhilox* rnd, size_t len) { string x; for (size_t i = 0; i < len; i++) { x += rnd->Uniform(256); } return x; } template <typename T> void OCWriteIncreasing(string* dest, const T& val); template <typename T> bool OCReadIncreasing(StringPiece* src, T* result); template <> void OCWriteIncreasing<string>(string* dest, const string& val) { OrderedCode::WriteString(dest, val); } template <> bool OCReadIncreasing<string>(StringPiece* src, string* result) { return OrderedCode::ReadString(src, result); } template <> void OCWriteIncreasing<uint64>(string* dest, const uint64& val) { OrderedCode::WriteNumIncreasing(dest, val); } template <> bool OCReadIncreasing<uint64>(StringPiece* src, uint64* result) { return OrderedCode::ReadNumIncreasing(src, result); } template <> void OCWriteIncreasing<int64_t>(string* dest, const int64_t& val) { OrderedCode::WriteSignedNumIncreasing(dest, val); } template <> bool OCReadIncreasing<int64_t>(StringPiece* src, int64_t* result) { return OrderedCode::ReadSignedNumIncreasing(src, result); } template <typename T> string OCWrite(T val) { string result; OCWriteIncreasing<T>(&result, val); return result; } template <typename T> void OCWriteToString(string* result, T val) { OCWriteIncreasing<T>(result, val); } template <typename T> bool OCRead(StringPiece* s, T* val) { return OCReadIncreasing<T>(s, val); } template <typename T> T TestRead(const string& a) { for (int i = 0; i < a.size() - 1; ++i) { StringPiece s(a.data(), i); CHECK(!OCRead<T>(&s, nullptr)); CHECK_EQ(s, a.substr(0, i)); } StringPiece s(a); T v; CHECK(OCRead<T>(&s, &v)); CHECK(s.empty()); return v; } template <typename T> void TestWriteRead(T expected) { EXPECT_EQ(expected, TestRead<T>(OCWrite<T>(expected))); } template <typename T, typename U> void TestWriteAppends(T first, U second) { string encoded; OCWriteToString<T>(&encoded, first); string encoded_first_only = encoded; OCWriteToString<U>(&encoded, second); EXPECT_NE(encoded, encoded_first_only); EXPECT_TRUE(absl::StartsWith(encoded, encoded_first_only)); } template <typename T> void TestNumbers(T multiplier) { for (T x = std::numeric_limits<T>().max(); x != 0; x /= 2) { TestWriteRead(multiplier * (x - 1)); TestWriteRead(multiplier * x); if (x != std::numeric_limits<T>::max()) { TestWriteRead(multiplier * (x + 1)); } else if (multiplier < 0 && multiplier == -1) { TestWriteRead(-x - 1); } } random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); for (int bits = 1; bits <= std::numeric_limits<T>().digits; ++bits) { const uint64 mask = (~0ULL) >> (64 - bits); for (int i = 0; i < 1000; i++) { T x = rnd.Rand64() & mask; TestWriteRead(multiplier * x); T y = rnd.Rand64() & mask; TestWriteAppends(multiplier * x, multiplier * y); } } } bool CompareStrings(const string& a, const string& b) { return (a < b); } template <typename T> void TestNumberOrdering() { string laststr = OCWrite<T>(std::numeric_limits<T>().min()); for (T num = std::numeric_limits<T>().min() / 2; num != 0; num /= 2) { string strminus1 = OCWrite<T>(num - 1); string str = OCWrite<T>(num); string strplus1 = OCWrite<T>(num + 1); CHECK(CompareStrings(strminus1, str)); CHECK(CompareStrings(str, strplus1)); CHECK(CompareStrings(laststr, str)); laststr = str; } laststr = OCWrite<T>(0); T num = 1; while (num < std::numeric_limits<T>().max() / 2) { num *= 2; string strminus1 = OCWrite<T>(num - 1); string str = OCWrite<T>(num); string strplus1 = OCWrite<T>(num + 1); CHECK(CompareStrings(strminus1, str)); CHECK(CompareStrings(str, strplus1)); CHECK(CompareStrings(laststr, str)); laststr = str; } } size_t FindSpecial(const string& x) { const char* p = x.data(); const char* limit = p + x.size(); const char* result = OrderedCode::TEST_SkipToNextSpecialByte(p, limit); return result - p; } template <size_t N> string ByteSequence(const char (&arr)[N]) { return string(arr, N - 1); } TEST(OrderedCode, SkipToNextSpecialByte) { for (size_t len = 0; len < 256; len++) { random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); string x; while (x.size() < len) { char c = 1 + rnd.Uniform(254); ASSERT_NE(c, 0); ASSERT_NE(c, 255); x += c; } EXPECT_EQ(FindSpecial(x), x.size()); for (size_t special_pos = 0; special_pos < len; special_pos++) { for (size_t special_test = 0; special_test < 2; special_test++) { const char special_byte = (special_test == 0) ? 0 : 255; string y = x; y[special_pos] = special_byte; EXPECT_EQ(FindSpecial(y), special_pos); if (special_pos < 16) { for (size_t rest = special_pos + 1; rest < len; rest++) { if (rnd.OneIn(3)) { y[rest] = rnd.OneIn(2) ? 0 : 255; EXPECT_EQ(FindSpecial(y), special_pos); } } } } } } } TEST(OrderedCode, ExhaustiveFindSpecial) { char buf[16]; char* limit = buf + sizeof(buf); int count = 0; for (int start_offset = 0; start_offset <= 5; start_offset += 5) { for (size_t i = 0; i < sizeof(buf); i++) { buf[i] = 'a'; } for (int b0 = 0; b0 < 256; b0++) { for (int b1 = 0; b1 < 256; b1++) { for (int b2 = 0; b2 < 256; b2++) { buf[start_offset + 0] = b0; buf[start_offset + 1] = b1; buf[start_offset + 2] = b2; char* expected; if (b0 == 0 || b0 == 255) { expected = &buf[start_offset]; } else if (b1 == 0 || b1 == 255) { expected = &buf[start_offset + 1]; } else if (b2 == 0 || b2 == 255) { expected = &buf[start_offset + 2]; } else { expected = limit; } count++; EXPECT_EQ(expected, OrderedCode::TEST_SkipToNextSpecialByte(buf, limit)); } } } } EXPECT_EQ(count, 256 * 256 * 256 * 2); } TEST(Uint64, EncodeDecode) { TestNumbers<uint64>(1); } TEST(Uint64, Ordering) { TestNumberOrdering<uint64>(); } TEST(Int64, EncodeDecode) { TestNumbers<int64_t>(1); TestNumbers<int64_t>(-1); } TEST(Int64, Ordering) { TestNumberOrdering<int64_t>(); } inline string StrNot(const string& s) { string result; for (string::const_iterator it = s.begin(); it != s.end(); ++it) result.push_back(~*it); return result; } template <typename T> void TestInvalidEncoding(const string& s) { StringPiece p(s); EXPECT_FALSE(OCRead<T>(&p, nullptr)); EXPECT_EQ(s, p); } TEST(OrderedCodeInvalidEncodingsTest, Overflow) { const string k2xx64U = "\x09\x01" + string(8, 0); TestInvalidEncoding<uint64>(k2xx64U); const string k2xx63 = "\xff\xc0\x80" + string(7, 0); TestInvalidEncoding<int64_t>(k2xx63); TestInvalidEncoding<int64_t>(StrNot(k2xx63)); } TEST(OrderedCodeInvalidEncodingsDeathTest, NonCanonical) { random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); for (int n = 2; n <= 9; ++n) { string non_minimal = string(1, n - 1) + string(1, 0) + RandomString(&rnd, n - 2); EXPECT_EQ(n, non_minimal.length()); EXPECT_NE(OCWrite<uint64>(0), non_minimal); #ifndef NDEBUG StringPiece s(non_minimal); EXPECT_DEATH(OrderedCode::ReadNumIncreasing(&s, nullptr), "invalid encoding"); #else TestRead<uint64>(non_minimal); #endif } for (int n = 2; n <= 10; ++n) { string header = string(n / 8, 0xff) + string(1, 0xff << (8 - (n % 8))); string non_minimal = header + string(1, rnd.Uniform(256) & ~*header.rbegin()) + RandomString(&rnd, n - header.length() - 1); EXPECT_EQ(n, non_minimal.length()); EXPECT_NE(OCWrite<int64_t>(0), non_minimal); #ifndef NDEBUG StringPiece s(non_minimal); EXPECT_DEATH(OrderedCode::ReadSignedNumIncreasing(&s, nullptr), "invalid encoding") << n; #else TestRead<int64_t>(non_minimal); #endif } } uint64 NextBits(random::SimplePhilox* rnd, int bits) { return (bits != 0) ? (rnd->Rand64() % (1LL << (bits - 1))) + (1LL << (bits - 1)) : 0; } template <typename T> void BM_WriteNum(::testing::benchmark::State& state, T multiplier) { constexpr int kValues = 64; T values[kValues]; random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); for (int i = 0; i < kValues; i++) { values[i] = NextBits(&rnd, state.max_iterations % 64) * multiplier; } string result; int index = 0; for (auto i : state) { result.clear(); OCWriteToString<T>(&result, values[index % kValues]); index++; } } template <typename T> void BM_ReadNum(::testing::benchmark::State& state, T multiplier) { random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); constexpr int kValues = 64; string values[kValues]; for (int i = 0; i < kValues; i++) { T val = NextBits(&rnd, i % 64) * multiplier; values[i] = OCWrite<T>(val); } uint32 index = 0; for (auto i : state) { T val; StringPiece s = values[index++ % kValues]; OCRead<T>(&s, &val); } } #define BENCHMARK_NUM(name, T, multiplier) \ void BM_Write##name(::testing::benchmark::State& state) { \ BM_WriteNum<T>(state, multiplier); \ } \ BENCHMARK(BM_Write##name); \ void BM_Read##name(::testing::benchmark::State& state) { \ BM_ReadNum<T>(state, multiplier); \ } \ BENCHMARK(BM_Read##name) BENCHMARK_NUM(NumIncreasing, uint64, 1); BENCHMARK_NUM(SignedNum, int64_t, 1); BENCHMARK_NUM(SignedNumNegative, int64_t, -1); #undef BENCHMARK_NUM TEST(String, EncodeDecode) { random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); for (int len = 0; len < 256; len++) { const string a = RandomString(&rnd, len); TestWriteRead(a); for (int len2 = 0; len2 < 64; len2++) { const string b = RandomString(&rnd, len2); TestWriteAppends(a, b); string out; OCWriteToString<string>(&out, a); OCWriteToString<string>(&out, b); string a2, b2, dummy; StringPiece s = out; StringPiece s2 = out; CHECK(OCRead<string>(&s, &a2)); CHECK(OCRead<string>(&s2, nullptr)); CHECK_EQ(s, s2); CHECK(OCRead<string>(&s, &b2)); CHECK(OCRead<string>(&s2, nullptr)); CHECK_EQ(s, s2); CHECK(!OCRead<string>(&s, &dummy)); CHECK(!OCRead<string>(&s2, nullptr)); CHECK_EQ(a, a2); CHECK_EQ(b, b2); CHECK(s.empty()); CHECK(s2.empty()); } } } #define STATIC_STR(str) StringPiece((str), sizeof(str) - 1) string EncodeStringIncreasing(StringPiece value) { string encoded; OrderedCode::WriteString(&encoded, value); return encoded; } TEST(String, Increasing) { ASSERT_EQ(EncodeStringIncreasing(STATIC_STR("")), EncodeStringIncreasing(STATIC_STR(""))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("")), EncodeStringIncreasing(STATIC_STR("\x00"))); ASSERT_EQ(EncodeStringIncreasing(STATIC_STR("\x00")), EncodeStringIncreasing(STATIC_STR("\x00"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\x00")), EncodeStringIncreasing(STATIC_STR("\x01"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\x01")), EncodeStringIncreasing(STATIC_STR("a"))); ASSERT_EQ(EncodeStringIncreasing(STATIC_STR("a")), EncodeStringIncreasing(STATIC_STR("a"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("a")), EncodeStringIncreasing(STATIC_STR("aa"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("aa")), EncodeStringIncreasing(STATIC_STR("\xff"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\xff")), EncodeStringIncreasing(STATIC_STR("\xff\x00"))); ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\xff\x00")), EncodeStringIncreasing(STATIC_STR("\xff\x01"))); } TEST(EncodingIsExpected, String) { std::vector<std::pair<string, string>> data = { {"", string("\x00\x01", 2)}, {"foo", string("foo\x00\x01", 5)}, {"hello", string("hello\x00\x01", 7)}, {string("\x00\x01\xff", 3), string("\x00\xff\x01\xff\x00\x00\x01", 7)}, }; for (const auto& t : data) { string result; OrderedCode::WriteString(&result, t.first); EXPECT_EQ(t.second, result); StringPiece in = result; string decoded; EXPECT_TRUE(OrderedCode::ReadString(&in, &decoded)); EXPECT_EQ(t.first, decoded); EXPECT_EQ("", in); } } TEST(EncodingIsExpected, Unsigned) { std::vector<std::pair<uint64, string>> data = { {0x0ull, ByteSequence("\000")}, {0x1ull, ByteSequence("\001\001")}, {0x2ull, ByteSequence("\001\002")}, {0x1ull, ByteSequence("\001\001")}, {0x2ull, ByteSequence("\001\002")}, {0x3ull, ByteSequence("\001\003")}, {0x3ull, ByteSequence("\001\003")}, {0x4ull, ByteSequence("\001\004")}, {0x5ull, ByteSequence("\001\005")}, {0x7ull, ByteSequence("\001\007")}, {0x8ull, ByteSequence("\001\010")}, {0x9ull, ByteSequence("\001\t")}, {0xfull, ByteSequence("\001\017")}, {0x10ull, ByteSequence("\001\020")}, {0x11ull, ByteSequence("\001\021")}, {0x1full, ByteSequence("\001\037")}, {0x20ull, ByteSequence("\001 ")}, {0x21ull, ByteSequence("\001!")}, {0x3full, ByteSequence("\001?")}, {0x40ull, ByteSequence("\001@")}, {0x41ull, ByteSequence("\001A")}, {0x7full, ByteSequence("\001\177")}, {0x80ull, ByteSequence("\001\200")}, {0x81ull, ByteSequence("\001\201")}, {0xffull, ByteSequence("\001\377")}, {0x100ull, ByteSequence("\002\001\000")}, {0x101ull, ByteSequence("\002\001\001")}, {0x1ffull, ByteSequence("\002\001\377")}, {0x200ull, ByteSequence("\002\002\000")}, {0x201ull, ByteSequence("\002\002\001")}, {0x3ffull, ByteSequence("\002\003\377")}, {0x400ull, ByteSequence("\002\004\000")}, {0x401ull, ByteSequence("\002\004\001")}, {0x7ffull, ByteSequence("\002\007\377")}, {0x800ull, ByteSequence("\002\010\000")}, {0x801ull, ByteSequence("\002\010\001")}, {0xfffull, ByteSequence("\002\017\377")}, {0x1000ull, ByteSequence("\002\020\000")}, {0x1001ull, ByteSequence("\002\020\001")}, {0x1fffull, ByteSequence("\002\037\377")}, {0x2000ull, ByteSequence("\002 \000")}, {0x2001ull, ByteSequence("\002 \001")}, {0x3fffull, ByteSequence("\002?\377")}, {0x4000ull, ByteSequence("\002@\000")}, {0x4001ull, ByteSequence("\002@\001")}, {0x7fffull, ByteSequence("\002\177\377")}, {0x8000ull, ByteSequence("\002\200\000")}, {0x8001ull, ByteSequence("\002\200\001")}, {0xffffull, ByteSequence("\002\377\377")}, {0x10000ull, ByteSequence("\003\001\000\000")}, {0x10001ull, ByteSequence("\003\001\000\001")}, {0x1ffffull, ByteSequence("\003\001\377\377")}, {0x20000ull, ByteSequence("\003\002\000\000")}, {0x20001ull, ByteSequence("\003\002\000\001")}, {0x3ffffull, ByteSequence("\003\003\377\377")}, {0x40000ull, ByteSequence("\003\004\000\000")}, {0x40001ull, ByteSequence("\003\004\000\001")}, {0x7ffffull, ByteSequence("\003\007\377\377")}, {0x80000ull, ByteSequence("\003\010\000\000")}, {0x80001ull, ByteSequence("\003\010\000\001")}, {0xfffffull, ByteSequence("\003\017\377\377")}, {0x100000ull, ByteSequence("\003\020\000\000")}, {0x100001ull, ByteSequence("\003\020\000\001")}, {0x1fffffull, ByteSequence("\003\037\377\377")}, {0x200000ull, ByteSequence("\003 \000\000")}, {0x200001ull, ByteSequence("\003 \000\001")}, {0x3fffffull, ByteSequence("\003?\377\377")}, {0x400000ull, ByteSequence("\003@\000\000")}, {0x400001ull, ByteSequence("\003@\000\001")}, {0x7fffffull, ByteSequence("\003\177\377\377")}, {0x800000ull, ByteSequence("\003\200\000\000")}, {0x800001ull, ByteSequence("\003\200\000\001")}, {0xffffffull, ByteSequence("\003\377\377\377")}, {0x1000000ull, ByteSequence("\004\001\000\000\000")}, {0x1000001ull, ByteSequence("\004\001\000\000\001")}, {0x1ffffffull, ByteSequence("\004\001\377\377\377")}, {0x2000000ull, ByteSequence("\004\002\000\000\000")}, {0x2000001ull, ByteSequence("\004\002\000\000\001")}, {0x3ffffffull, ByteSequence("\004\003\377\377\377")}, {0x4000000ull, ByteSequence("\004\004\000\000\000")}, {0x4000001ull, ByteSequence("\004\004\000\000\001")}, {0x7ffffffull, ByteSequence("\004\007\377\377\377")}, {0x8000000ull, ByteSequence("\004\010\000\000\000")}, {0x8000001ull, ByteSequence("\004\010\000\000\001")}, {0xfffffffull, ByteSequence("\004\017\377\377\377")}, {0x10000000ull, ByteSequence("\004\020\000\000\000")}, {0x10000001ull, ByteSequence("\004\020\000\000\001")}, {0x1fffffffull, ByteSequence("\004\037\377\377\377")}, {0x20000000ull, ByteSequence("\004 \000\000\000")}, {0x20000001ull, ByteSequence("\004 \000\000\001")}, {0x3fffffffull, ByteSequence("\004?\377\377\377")}, {0x40000000ull, ByteSequence("\004@\000\000\000")}, {0x40000001ull, ByteSequence("\004@\000\000\001")}, {0x7fffffffull, ByteSequence("\004\177\377\377\377")}, {0x80000000ull, ByteSequence("\004\200\000\000\000")}, {0x80000001ull, ByteSequence("\004\200\000\000\001")}, {0xffffffffull, ByteSequence("\004\377\377\377\377")}, {0x100000000ull, ByteSequence("\005\001\000\000\000\000")}, {0x100000001ull, ByteSequence("\005\001\000\000\000\001")}, {0x1ffffffffull, ByteSequence("\005\001\377\377\377\377")}, {0x200000000ull, ByteSequence("\005\002\000\000\000\000")}, {0x200000001ull, ByteSequence("\005\002\000\000\000\001")}, {0x3ffffffffull, ByteSequence("\005\003\377\377\377\377")}, {0x400000000ull, ByteSequence("\005\004\000\000\000\000")}, {0x400000001ull, ByteSequence("\005\004\000\000\000\001")}, {0x7ffffffffull, ByteSequence("\005\007\377\377\377\377")}, {0x800000000ull, ByteSequence("\005\010\000\000\000\000")}, {0x800000001ull, ByteSequence("\005\010\000\000\000\001")}, {0xfffffffffull, ByteSequence("\005\017\377\377\377\377")}, {0x1000000000ull, ByteSequence("\005\020\000\000\000\000")}, {0x1000000001ull, ByteSequence("\005\020\000\000\000\001")}, {0x1fffffffffull, ByteSequence("\005\037\377\377\377\377")}, {0x2000000000ull, ByteSequence("\005 \000\000\000\000")}, {0x2000000001ull, ByteSequence("\005 \000\000\000\001")}, {0x3fffffffffull, ByteSequence("\005?\377\377\377\377")}, {0x4000000000ull, ByteSequence("\005@\000\000\000\000")}, {0x4000000001ull, ByteSequence("\005@\000\000\000\001")}, {0x7fffffffffull, ByteSequence("\005\177\377\377\377\377")}, {0x8000000000ull, ByteSequence("\005\200\000\000\000\000")}, {0x8000000001ull, ByteSequence("\005\200\000\000\000\001")}, {0xffffffffffull, ByteSequence("\005\377\377\377\377\377")}, {0x10000000000ull, ByteSequence("\006\001\000\000\000\000\000")}, {0x10000000001ull, ByteSequence("\006\001\000\000\000\000\001")}, {0x1ffffffffffull, ByteSequence("\006\001\377\377\377\377\377")}, {0x20000000000ull, ByteSequence("\006\002\000\000\000\000\000")}, {0x20000000001ull, ByteSequence("\006\002\000\000\000\000\001")}, {0x3ffffffffffull, ByteSequence("\006\003\377\377\377\377\377")}, {0x40000000000ull, ByteSequence("\006\004\000\000\000\000\000")}, {0x40000000001ull, ByteSequence("\006\004\000\000\000\000\001")}, {0x7ffffffffffull, ByteSequence("\006\007\377\377\377\377\377")}, {0x80000000000ull, ByteSequence("\006\010\000\000\000\000\000")}, {0x80000000001ull, ByteSequence("\006\010\000\000\000\000\001")}, {0xfffffffffffull, ByteSequence("\006\017\377\377\377\377\377")}, {0x100000000000ull, ByteSequence("\006\020\000\000\000\000\000")}, {0x100000000001ull, ByteSequence("\006\020\000\000\000\000\001")}, {0x1fffffffffffull, ByteSequence("\006\037\377\377\377\377\377")}, {0x200000000000ull, ByteSequence("\006 \000\000\000\000\000")}, {0x200000000001ull, ByteSequence("\006 \000\000\000\000\001")}, {0x3fffffffffffull, ByteSequence("\006?\377\377\377\377\377")}, {0x400000000000ull, ByteSequence("\006@\000\000\000\000\000")}, {0x400000000001ull, ByteSequence("\006@\000\000\000\000\001")}, {0x7fffffffffffull, ByteSequence("\006\177\377\377\377\377\377")}, {0x800000000000ull, ByteSequence("\006\200\000\000\000\000\000")}, {0x800000000001ull, ByteSequence("\006\200\000\000\000\000\001")}, {0xffffffffffffull, ByteSequence("\006\377\377\377\377\377\377")}, {0x1000000000000ull, ByteSequence("\007\001\000\000\000\000\000\000")}, {0x1000000000001ull, ByteSequence("\007\001\000\000\000\000\000\001")}, {0x1ffffffffffffull, ByteSequence("\007\001\377\377\377\377\377\377")}, {0x2000000000000ull, ByteSequence("\007\002\000\000\000\000\000\000")}, {0x2000000000001ull, ByteSequence("\007\002\000\000\000\000\000\001")}, {0x3ffffffffffffull, ByteSequence("\007\003\377\377\377\377\377\377")}, {0x4000000000000ull, ByteSequence("\007\004\000\000\000\000\000\000")}, {0x4000000000001ull, ByteSequence("\007\004\000\000\000\000\000\001")}, {0x7ffffffffffffull, ByteSequence("\007\007\377\377\377\377\377\377")}, {0x8000000000000ull, ByteSequence("\007\010\000\000\000\000\000\000")}, {0x8000000000001ull, ByteSequence("\007\010\000\000\000\000\000\001")}, {0xfffffffffffffull, ByteSequence("\007\017\377\377\377\377\377\377")}, {0x10000000000000ull, ByteSequence("\007\020\000\000\000\000\000\000")}, {0x10000000000001ull, ByteSequence("\007\020\000\000\000\000\000\001")}, {0x1fffffffffffffull, ByteSequence("\007\037\377\377\377\377\377\377")}, {0x20000000000000ull, ByteSequence("\007 \000\000\000\000\000\000")}, {0x20000000000001ull, ByteSequence("\007 \000\000\000\000\000\001")}, {0x3fffffffffffffull, ByteSequence("\007?\377\377\377\377\377\377")}, {0x40000000000000ull, ByteSequence("\007@\000\000\000\000\000\000")}, {0x40000000000001ull, ByteSequence("\007@\000\000\000\000\000\001")}, {0x7fffffffffffffull, ByteSequence("\007\177\377\377\377\377\377\377")}, {0x80000000000000ull, ByteSequence("\007\200\000\000\000\000\000\000")}, {0x80000000000001ull, ByteSequence("\007\200\000\000\000\000\000\001")}, {0xffffffffffffffull, ByteSequence("\007\377\377\377\377\377\377\377")}, {0x100000000000000ull, ByteSequence("\010\001\000\000\000\000\000\000\000")}, {0x100000000000001ull, ByteSequence("\010\001\000\000\000\000\000\000\001")}, {0x1ffffffffffffffull, ByteSequence("\010\001\377\377\377\377\377\377\377")}, {0x200000000000000ull, ByteSequence("\010\002\000\000\000\000\000\000\000")}, {0x200000000000001ull, ByteSequence("\010\002\000\000\000\000\000\000\001")}, {0x3ffffffffffffffull, ByteSequence("\010\003\377\377\377\377\377\377\377")}, {0x400000000000000ull, ByteSequence("\010\004\000\000\000\000\000\000\000")}, {0x400000000000001ull, ByteSequence("\010\004\000\000\000\000\000\000\001")}, {0x7ffffffffffffffull, ByteSequence("\010\007\377\377\377\377\377\377\377")}, {0x800000000000000ull, ByteSequence("\010\010\000\000\000\000\000\000\000")}, {0x800000000000001ull, ByteSequence("\010\010\000\000\000\000\000\000\001")}, {0xfffffffffffffffull, ByteSequence("\010\017\377\377\377\377\377\377\377")}, {0x1000000000000000ull, ByteSequence("\010\020\000\000\000\000\000\000\000")}, {0x1000000000000001ull, ByteSequence("\010\020\000\000\000\000\000\000\001")}, {0x1fffffffffffffffull, ByteSequence("\010\037\377\377\377\377\377\377\377")}, {0x2000000000000000ull, ByteSequence("\010 \000\000\000\000\000\000\000")}, {0x2000000000000001ull, ByteSequence("\010 \000\000\000\000\000\000\001")}, {0x3fffffffffffffffull, ByteSequence("\010?\377\377\377\377\377\377\377")}, {0x4000000000000000ull, ByteSequence("\010@\000\000\000\000\000\000\000")}, {0x4000000000000001ull, ByteSequence("\010@\000\000\000\000\000\000\001")}, {0x7fffffffffffffffull, ByteSequence("\010\177\377\377\377\377\377\377\377")}, {0x8000000000000000ull, ByteSequence("\010\200\000\000\000\000\000\000\000")}, {0x8000000000000001ull, ByteSequence("\010\200\000\000\000\000\000\000\001")}, }; for (const auto& t : data) { uint64 num = t.first; string result; OrderedCode::WriteNumIncreasing(&result, num); EXPECT_EQ(t.second, result) << std::hex << num; StringPiece in = result; uint64 decoded; EXPECT_TRUE(OrderedCode::ReadNumIncreasing(&in, &decoded)); EXPECT_EQ(num, decoded); EXPECT_EQ("", in); } } TEST(EncodingIsExpected, Signed) { std::vector<std::pair<int64_t, string>> data = { {0ll, ByteSequence("\200")}, {1ll, ByteSequence("\201")}, {2ll, ByteSequence("\202")}, {1ll, ByteSequence("\201")}, {2ll, ByteSequence("\202")}, {3ll, ByteSequence("\203")}, {3ll, ByteSequence("\203")}, {4ll, ByteSequence("\204")}, {5ll, ByteSequence("\205")}, {7ll, ByteSequence("\207")}, {8ll, ByteSequence("\210")}, {9ll, ByteSequence("\211")}, {15ll, ByteSequence("\217")}, {16ll, ByteSequence("\220")}, {17ll, ByteSequence("\221")}, {31ll, ByteSequence("\237")}, {32ll, ByteSequence("\240")}, {33ll, ByteSequence("\241")}, {63ll, ByteSequence("\277")}, {64ll, ByteSequence("\300@")}, {65ll, ByteSequence("\300A")}, {127ll, ByteSequence("\300\177")}, {128ll, ByteSequence("\300\200")}, {129ll, ByteSequence("\300\201")}, {255ll, ByteSequence("\300\377")}, {256ll, ByteSequence("\301\000")}, {257ll, ByteSequence("\301\001")}, {511ll, ByteSequence("\301\377")}, {512ll, ByteSequence("\302\000")}, {513ll, ByteSequence("\302\001")}, {1023ll, ByteSequence("\303\377")}, {1024ll, ByteSequence("\304\000")}, {1025ll, ByteSequence("\304\001")}, {2047ll, ByteSequence("\307\377")}, {2048ll, ByteSequence("\310\000")}, {2049ll, ByteSequence("\310\001")}, {4095ll, ByteSequence("\317\377")}, {4096ll, ByteSequence("\320\000")}, {4097ll, ByteSequence("\320\001")}, {8191ll, ByteSequence("\337\377")}, {8192ll, ByteSequence("\340 \000")}, {8193ll, ByteSequence("\340 \001")}, {16383ll, ByteSequence("\340?\377")}, {16384ll, ByteSequence("\340@\000")}, {16385ll, ByteSequence("\340@\001")}, {32767ll, ByteSequence("\340\177\377")}, {32768ll, ByteSequence("\340\200\000")}, {32769ll, ByteSequence("\340\200\001")}, {65535ll, ByteSequence("\340\377\377")}, {65536ll, ByteSequence("\341\000\000")}, {65537ll, ByteSequence("\341\000\001")}, {131071ll, ByteSequence("\341\377\377")}, {131072ll, ByteSequence("\342\000\000")}, {131073ll, ByteSequence("\342\000\001")}, {262143ll, ByteSequence("\343\377\377")}, {262144ll, ByteSequence("\344\000\000")}, {262145ll, ByteSequence("\344\000\001")}, {524287ll, ByteSequence("\347\377\377")}, {524288ll, ByteSequence("\350\000\000")}, {524289ll, ByteSequence("\350\000\001")}, {1048575ll, ByteSequence("\357\377\377")}, {1048576ll, ByteSequence("\360\020\000\000")}, {1048577ll, ByteSequence("\360\020\000\001")}, {2097151ll, ByteSequence("\360\037\377\377")}, {2097152ll, ByteSequence("\360 \000\000")}, {2097153ll, ByteSequence("\360 \000\001")}, {4194303ll, ByteSequence("\360?\377\377")}, {4194304ll, ByteSequence("\360@\000\000")}, {4194305ll, ByteSequence("\360@\000\001")}, {8388607ll, ByteSequence("\360\177\377\377")}, {8388608ll, ByteSequence("\360\200\000\000")}, {8388609ll, ByteSequence("\360\200\000\001")}, {16777215ll, ByteSequence("\360\377\377\377")}, {16777216ll, ByteSequence("\361\000\000\000")}, {16777217ll, ByteSequence("\361\000\000\001")}, {33554431ll, ByteSequence
bool OrderedCode::ReadString(StringPiece* src, string* result) { return ReadStringInternal(src, result); }
#undef BENCHMARK_NUM TEST(String, EncodeDecode) { random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); for (int len = 0; len < 256; len++) { const string a = RandomString(&rnd, len); TestWriteRead(a); for (int len2 = 0; len2 < 64; len2++) { const string b = RandomString(&rnd, len2); TestWriteAppends(a, b); string out; OCWriteToString<string>(&out, a); OCWriteToString<string>(&out, b); string a2, b2, dummy; StringPiece s = out; StringPiece s2 = out; CHECK(OCRead<string>(&s, &a2)); CHECK(OCRead<string>(&s2, nullptr)); CHECK_EQ(s, s2); CHECK(OCRead<string>(&s, &b2)); CHECK(OCRead<string>(&s2, nullptr)); CHECK_EQ(s, s2); CHECK(!OCRead<string>(&s, &dummy)); CHECK(!OCRead<string>(&s2, nullptr)); CHECK_EQ(a, a2); CHECK_EQ(b, b2); CHECK(s.empty()); CHECK(s2.empty()); } } } TEST(EncodingIsExpected, String) { std::vector<std::pair<string, string>> data = { {"", string("\x00\x01", 2)}, {"foo", string("foo\x00\x01", 5)}, {"hello", string("hello\x00\x01", 7)}, {string("\x00\x01\xff", 3), string("\x00\xff\x01\xff\x00\x00\x01", 7)}, }; for (const auto& t : data) { string result; OrderedCode::WriteString(&result, t.first); EXPECT_EQ(t.second, result); StringPiece in = result; string decoded; EXPECT_TRUE(OrderedCode::ReadString(&in, &decoded)); EXPECT_EQ(t.first, decoded); EXPECT_EQ("", in); } }
#include "tsl/profiler/backends/cpu/traceme_recorder.h" #include <stddef.h> #include <stdint.h> #include <algorithm> #include <atomic> #include <deque> #include <optional> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "tsl/platform/env.h" #include "tsl/platform/logging.h" #include "tsl/platform/macros.h" #include "tsl/platform/types.h" #include "tsl/profiler/utils/lock_free_queue.h" #include "tsl/profiler/utils/per_thread.h" namespace tsl { namespace profiler { namespace internal { #ifdef _WIN32 #define DECL_DLL_EXPORT __declspec(dllexport) #else #define DECL_DLL_EXPORT #endif DECL_DLL_EXPORT std::atomic<int> g_trace_level( TraceMeRecorder::kTracingDisabled); static_assert(ATOMIC_INT_LOCK_FREE == 2, "Assumed atomic<int> was lock free"); } namespace { class SplitEventTracker { public: void AddStart(TraceMeRecorder::Event&& event) { DCHECK(event.IsStart()); start_events_.emplace(event.ActivityId(), std::move(event)); } void AddEnd(TraceMeRecorder::Event* event) { DCHECK(event->IsEnd()); if (!FindStartAndMerge(event)) { end_events_.push_back(event); } } void HandleCrossThreadEvents() { for (auto* event : end_events_) { FindStartAndMerge(event); } } private: bool FindStartAndMerge(TraceMeRecorder::Event* event) { auto iter = start_events_.find(event->ActivityId()); if (iter == start_events_.end()) return false; auto& start_event = iter->second; event->name = std::move(start_event.name); event->start_time = start_event.start_time; start_events_.erase(iter); return true; } absl::flat_hash_map<int64_t, TraceMeRecorder::Event> start_events_; std::vector<TraceMeRecorder::Event*> end_events_; }; class ThreadLocalRecorder { public: ThreadLocalRecorder() { auto* env = Env::Default(); info_.tid = env->GetCurrentThreadId(); env->GetCurrentThreadName(&info_.name); } const TraceMeRecorder::ThreadInfo& Info() const { return info_; } void Record(TraceMeRecorder::Event&& event) { queue_.Push(std::move(event)); } void Clear() { queue_.Clear(); } TF_MUST_USE_RESULT std::deque<TraceMeRecorder::Event> Consume( SplitEventTracker* split_event_tracker) { std::deque<TraceMeRecorder::Event> events; std::optional<TraceMeRecorder::Event> event; while ((event = queue_.Pop())) { if (event->IsStart()) { split_event_tracker->AddStart(*std::move(event)); continue; } events.push_back(*std::move(event)); if (events.back().IsEnd()) { split_event_tracker->AddEnd(&events.back()); } } return events; } private: TraceMeRecorder::ThreadInfo info_; LockFreeQueue<TraceMeRecorder::Event> queue_; }; } void TraceMeRecorder::Clear() { auto recorders = PerThread<ThreadLocalRecorder>::StartRecording(); for (auto& recorder : recorders) { recorder->Clear(); }; } TraceMeRecorder::Events TraceMeRecorder::Consume() { TraceMeRecorder::Events result; SplitEventTracker split_event_tracker; auto recorders = PerThread<ThreadLocalRecorder>::StopRecording(); for (auto& recorder : recorders) { auto events = recorder->Consume(&split_event_tracker); if (!events.empty()) { result.push_back({recorder->Info(), std::move(events)}); } }; split_event_tracker.HandleCrossThreadEvents(); return result; } bool TraceMeRecorder::Start(int level) { level = std::max(0, level); int expected = kTracingDisabled; bool started = internal::g_trace_level.compare_exchange_strong( expected, level, std::memory_order_acq_rel); if (started) { Clear(); } return started; } void TraceMeRecorder::Record(Event&& event) { PerThread<ThreadLocalRecorder>::Get().Record(std::move(event)); } TraceMeRecorder::Events TraceMeRecorder::Stop() { TraceMeRecorder::Events events; if (internal::g_trace_level.exchange( kTracingDisabled, std::memory_order_acq_rel) != kTracingDisabled) { events = Consume(); } return events; } int64_t TraceMeRecorder::NewActivityId() { static std::atomic<int32> thread_counter(1); const thread_local static int32_t thread_id = thread_counter.fetch_add(1, std::memory_order_relaxed); thread_local static uint32 per_thread_activity_id = 0; return static_cast<int64_t>(thread_id) << 32 | per_thread_activity_id++; } } }
#include "tsl/profiler/backends/cpu/traceme_recorder.h" #include <atomic> #include <set> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "tsl/platform/env.h" #include "tsl/platform/logging.h" #include "tsl/platform/notification.h" #include "tsl/platform/test.h" #include "tsl/platform/threadpool.h" #include "tsl/platform/types.h" #include "tsl/profiler/utils/math_utils.h" #include "tsl/profiler/utils/time_utils.h" namespace tsl { namespace profiler { namespace { using ::testing::ElementsAre; MATCHER_P(Named, name, "") { return arg.name == name; } TEST(RecorderTest, SingleThreaded) { int64_t start_time = GetCurrentTimeNanos(); int64_t end_time = start_time + UniToNano(1); TraceMeRecorder::Record({"before", start_time, end_time}); TraceMeRecorder::Start(1); TraceMeRecorder::Record({"during1", start_time, end_time}); TraceMeRecorder::Record({"during2", start_time, end_time}); auto results = TraceMeRecorder::Stop(); TraceMeRecorder::Record({"after", start_time, end_time}); ASSERT_EQ(results.size(), 1); EXPECT_THAT(results[0].events, ElementsAre(Named("during1"), Named("during2"))); } TEST(RecorderTest, Multithreaded) { constexpr static int kNumThreads = 4; tsl::Notification start; tsl::Notification stop; thread::ThreadPool pool(tsl::Env::Default(), "testpool", kNumThreads); std::atomic<int> thread_count = {0}; for (int i = 0; i < kNumThreads; i++) { pool.Schedule([&start, &stop, &thread_count] { uint64 j = 0; bool was_active = false; auto record_event = [&j]() { int64_t start_time = GetCurrentTimeNanos(); int64_t end_time = start_time + UniToNano(1); TraceMeRecorder::Record( {absl::StrCat(j++), start_time, end_time}); }; thread_count.fetch_add(1, std::memory_order_relaxed); start.WaitForNotification(); while (!stop.HasBeenNotified()) { if (TraceMeRecorder::Active()) { record_event(); was_active = true; } if (was_active && !TraceMeRecorder::Active()) { record_event(); record_event(); was_active = false; } SpinForNanos(10); } }); } struct ThreadState { bool split_session = false; bool overlapping_sessions = false; std::set<uint64> events; }; absl::flat_hash_map<uint32 , ThreadState> thread_state; auto done = [&thread_state] { for (const auto& id_and_thread : thread_state) { auto& t = id_and_thread.second; if (t.events.size() < 2) return false; } return true; }; while (thread_count.load(std::memory_order_relaxed) < kNumThreads) { LOG(INFO) << "Waiting for all threads to spin up..."; SleepForMillis(1); } start.Notify(); constexpr static int kMaxIters = 100; for (int iters = 0; iters < kMaxIters && !done(); ++iters) { LOG(INFO) << "Looping until convergence, iteration: " << iters; TraceMeRecorder::Start(1); SleepForMillis(100); auto results = TraceMeRecorder::Stop(); for (const auto& thread : results) { if (thread.events.empty()) continue; auto& state = thread_state[thread.thread.tid]; std::set<uint64> session_events; uint64 current = 0; for (const auto& event : thread.events) { uint64 activity_id; ASSERT_TRUE(absl::SimpleAtoi(event.name, &activity_id)); session_events.emplace(activity_id); if (current != 0 && activity_id != current + 1) { state.split_session = true; } current = activity_id; } for (const auto& event : session_events) { auto result = state.events.emplace(event); if (!result.second) { state.overlapping_sessions = true; } } } SleepForMillis(1); } stop.Notify(); for (const auto& id_and_thread : thread_state) { auto& thread = id_and_thread.second; EXPECT_FALSE(thread.split_session) << "Expected contiguous events in a session"; EXPECT_FALSE(thread.overlapping_sessions) << "Expected disjoint sessions"; EXPECT_GT(thread.events.size(), 1) << "Expected gaps in thread events between sessions"; } } } } }
TraceMeRecorder::Events TraceMeRecorder::Consume() { TraceMeRecorder::Events result; SplitEventTracker split_event_tracker; auto recorders = PerThread<ThreadLocalRecorder>::StopRecording(); for (auto& recorder : recorders) { auto events = recorder->Consume(&split_event_tracker); if (!events.empty()) { result.push_back({recorder->Info(), std::move(events)}); } }; split_event_tracker.HandleCrossThreadEvents(); return result; }
TEST(RecorderTest, SingleThreaded) { int64_t start_time = GetCurrentTimeNanos(); int64_t end_time = start_time + UniToNano(1); TraceMeRecorder::Record({"before", start_time, end_time}); TraceMeRecorder::Start(1); TraceMeRecorder::Record({"during1", start_time, end_time}); TraceMeRecorder::Record({"during2", start_time, end_time}); auto results = TraceMeRecorder::Stop(); TraceMeRecorder::Record({"after", start_time, end_time}); ASSERT_EQ(results.size(), 1); EXPECT_THAT(results[0].events, ElementsAre(Named("during1"), Named("during2"))); } TEST(RecorderTest, Multithreaded) { constexpr static int kNumThreads = 4; tsl::Notification start; tsl::Notification stop; thread::ThreadPool pool(tsl::Env::Default(), "testpool", kNumThreads); std::atomic<int> thread_count = {0}; for (int i = 0; i < kNumThreads; i++) { pool.Schedule([&start, &stop, &thread_count] { uint64 j = 0; bool was_active = false; auto record_event = [&j]() { int64_t start_time = GetCurrentTimeNanos(); int64_t end_time = start_time + UniToNano(1); TraceMeRecorder::Record( {absl::StrCat(j++), start_time, end_time}); }; thread_count.fetch_add(1, std::memory_order_relaxed); start.WaitForNotification(); while (!stop.HasBeenNotified()) { if (TraceMeRecorder::Active()) { record_event(); was_active = true; } if (was_active && !TraceMeRecorder::Active()) { record_event(); record_event(); was_active = false; } SpinForNanos(10); } }); } struct ThreadState { bool split_session = false; bool overlapping_sessions = false; std::set<uint64> events; }; absl::flat_hash_map<uint32 , ThreadState> thread_state; auto done = [&thread_state] { for (const auto& id_and_thread : thread_state) { auto& t = id_and_thread.second; if (t.events.size() < 2) return false; } return true; }; while (thread_count.load(std::memory_order_relaxed) < kNumThreads) { LOG(INFO) << "Waiting for all threads to spin up..."; SleepForMillis(1); } start.Notify(); constexpr static int kMaxIters = 100; for (int iters = 0; iters < kMaxIters && !done(); ++iters) { LOG(INFO) << "Looping until convergence, iteration: " << iters; TraceMeRecorder::Start(1); SleepForMillis(100); auto results = TraceMeRecorder::Stop(); for (const auto& thread : results) { if (thread.events.empty()) continue; auto& state = thread_state[thread.thread.tid]; std::set<uint64> session_events; uint64 current = 0; for (const auto& event : thread.events) { uint64 activity_id; ASSERT_TRUE(absl::SimpleAtoi(event.name, &activity_id)); session_events.emplace(activity_id); if (current != 0 && activity_id != current + 1) { state.split_session = true; } current = activity_id; } for (const auto& event : session_events) { auto result = state.events.emplace(event); if (!result.second) { state.overlapping_sessions = true; } } } SleepForMillis(1); } stop.Notify(); for (const auto& id_and_thread : thread_state) { auto& thread = id_and_thread.second; EXPECT_FALSE(thread.split_session) << "Expected contiguous events in a session"; EXPECT_FALSE(thread.overlapping_sessions) << "Expected disjoint sessions"; EXPECT_GT(thread.events.size(), 1) << "Expected gaps in thread events between sessions"; } }
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h" #include <string> #include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/tools/command_line_flags.h" #include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h" #include "tensorflow/lite/tools/evaluation/utils.h" #include "tensorflow/lite/tools/logging.h" #include "tensorflow/lite/tools/tool_params.h" namespace tflite { namespace evaluation { namespace { constexpr char kNnapiDelegate[] = "nnapi"; constexpr char kGpuDelegate[] = "gpu"; constexpr char kHexagonDelegate[] = "hexagon"; constexpr char kXnnpackDelegate[] = "xnnpack"; constexpr char kCoremlDelegate[] = "coreml"; } TfliteInferenceParams::Delegate ParseStringToDelegateType( const std::string& val) { if (val == kNnapiDelegate) return TfliteInferenceParams::NNAPI; if (val == kGpuDelegate) return TfliteInferenceParams::GPU; if (val == kHexagonDelegate) return TfliteInferenceParams::HEXAGON; if (val == kXnnpackDelegate) return TfliteInferenceParams::XNNPACK; if (val == kCoremlDelegate) return TfliteInferenceParams::COREML; return TfliteInferenceParams::NONE; } TfLiteDelegatePtr CreateTfLiteDelegate(const TfliteInferenceParams& params, std::string* error_msg) { const auto type = params.delegate(); switch (type) { case TfliteInferenceParams::NNAPI: { auto p = CreateNNAPIDelegate(); if (!p && error_msg) *error_msg = "NNAPI not supported"; return p; } case TfliteInferenceParams::GPU: { auto p = CreateGPUDelegate(); if (!p && error_msg) *error_msg = "GPU delegate not supported."; return p; } case TfliteInferenceParams::HEXAGON: { auto p = CreateHexagonDelegate("", false); if (!p && error_msg) { *error_msg = "Hexagon delegate is not supported on the platform or required " "libraries are missing."; } return p; } case TfliteInferenceParams::XNNPACK: { auto p = CreateXNNPACKDelegate(params.num_threads(), false); if (!p && error_msg) *error_msg = "XNNPACK delegate not supported."; return p; } case TfliteInferenceParams::COREML: { auto p = CreateCoreMlDelegate(); if (!p && error_msg) *error_msg = "CoreML delegate not supported."; return p; } case TfliteInferenceParams::NONE: return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {}); default: if (error_msg) { *error_msg = "Creation of delegate type: " + TfliteInferenceParams::Delegate_Name(type) + " not supported yet."; } return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {}); } } DelegateProviders::DelegateProviders() : delegate_list_util_(&params_), delegates_map_([=]() -> std::unordered_map<std::string, int> { std::unordered_map<std::string, int> delegates_map; const auto& providers = delegate_list_util_.providers(); for (int i = 0; i < providers.size(); ++i) { delegates_map[providers[i]->GetName()] = i; } return delegates_map; }()) { delegate_list_util_.AddAllDelegateParams(); } std::vector<Flag> DelegateProviders::GetFlags() { std::vector<Flag> flags; delegate_list_util_.AppendCmdlineFlags(flags); return flags; } bool DelegateProviders::InitFromCmdlineArgs(int* argc, const char** argv) { std::vector<Flag> flags = GetFlags(); bool parse_result = Flags::Parse(argc, argv, flags); if (!parse_result || params_.Get<bool>("help")) { std::string usage = Flags::Usage(argv[0], flags); TFLITE_LOG(ERROR) << usage; parse_result = false; } return parse_result; } TfLiteDelegatePtr DelegateProviders::CreateDelegate( const std::string& name) const { const auto it = delegates_map_.find(name); if (it == delegates_map_.end()) { return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {}); } const auto& providers = delegate_list_util_.providers(); return providers[it->second]->CreateTfLiteDelegate(params_); } tools::ToolParams DelegateProviders::GetAllParams( const TfliteInferenceParams& params) const { tools::ToolParams tool_params; tool_params.Merge(params_, false); if (params.has_num_threads()) { tool_params.Set<int32_t>("num_threads", params.num_threads()); } const auto type = params.delegate(); switch (type) { case TfliteInferenceParams::NNAPI: if (tool_params.HasParam("use_nnapi")) { tool_params.Set<bool>("use_nnapi", true); } break; case TfliteInferenceParams::GPU: if (tool_params.HasParam("use_gpu")) { tool_params.Set<bool>("use_gpu", true); } break; case TfliteInferenceParams::HEXAGON: if (tool_params.HasParam("use_hexagon")) { tool_params.Set<bool>("use_hexagon", true); } break; case TfliteInferenceParams::XNNPACK: if (tool_params.HasParam("use_xnnpack")) { tool_params.Set<bool>("use_xnnpack", true); } if (tool_params.HasParam("xnnpack_force_fp16")) { tool_params.Set<bool>("xnnpack_force_fp16", true); } break; case TfliteInferenceParams::COREML: if (tool_params.HasParam("use_coreml")) { tool_params.Set<bool>("use_coreml", true); } break; default: break; } return tool_params; } } }
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h" #include <gtest/gtest.h> #include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h" #include "tensorflow/lite/tools/tool_params.h" namespace tflite { namespace evaluation { namespace { TEST(EvaluationDelegateProviderTest, ParseStringToDelegateType) { EXPECT_EQ(TfliteInferenceParams::NNAPI, ParseStringToDelegateType("nnapi")); EXPECT_EQ(TfliteInferenceParams::GPU, ParseStringToDelegateType("gpu")); EXPECT_EQ(TfliteInferenceParams::HEXAGON, ParseStringToDelegateType("hexagon")); EXPECT_EQ(TfliteInferenceParams::XNNPACK, ParseStringToDelegateType("xnnpack")); EXPECT_EQ(TfliteInferenceParams::NONE, ParseStringToDelegateType("Gpu")); EXPECT_EQ(TfliteInferenceParams::NONE, ParseStringToDelegateType("Testing")); } TEST(EvaluationDelegateProviderTest, CreateTfLiteDelegate) { TfliteInferenceParams params; params.set_delegate(TfliteInferenceParams::NONE); EXPECT_TRUE(!CreateTfLiteDelegate(params)); } TEST(EvaluationDelegateProviderTest, DelegateProvidersParams) { DelegateProviders providers; const auto& params = providers.GetAllParams(); EXPECT_TRUE(params.HasParam("use_nnapi")); EXPECT_TRUE(params.HasParam("use_gpu")); int argc = 3; const char* argv[] = {"program_name", "--use_gpu=true", "--other_undefined_flag=1"}; EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv)); EXPECT_TRUE(params.Get<bool>("use_gpu")); EXPECT_EQ(2, argc); EXPECT_EQ("--other_undefined_flag=1", argv[1]); } TEST(EvaluationDelegateProviderTest, GetAllParamsWithTfliteInferenceParams) { DelegateProviders providers; int argc = 2; const char* argv[] = {"program_name", "--num_threads=1"}; EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv)); const auto& default_params = providers.GetAllParams(); EXPECT_EQ(1, default_params.Get<int>("num_threads")); TfliteInferenceParams params; params.set_delegate(TfliteInferenceParams::NONE); params.set_num_threads(4); tools::ToolParams tool_params = providers.GetAllParams(params); EXPECT_EQ(4, tool_params.Get<int>("num_threads")); EXPECT_EQ(1, argc); } } } }
tools::ToolParams DelegateProviders::GetAllParams( const TfliteInferenceParams& params) const { tools::ToolParams tool_params; tool_params.Merge(params_, false); if (params.has_num_threads()) { tool_params.Set<int32_t>("num_threads", params.num_threads()); } const auto type = params.delegate(); switch (type) { case TfliteInferenceParams::NNAPI: if (tool_params.HasParam("use_nnapi")) { tool_params.Set<bool>("use_nnapi", true); } break; case TfliteInferenceParams::GPU: if (tool_params.HasParam("use_gpu")) { tool_params.Set<bool>("use_gpu", true); } break; case TfliteInferenceParams::HEXAGON: if (tool_params.HasParam("use_hexagon")) { tool_params.Set<bool>("use_hexagon", true); } break; case TfliteInferenceParams::XNNPACK: if (tool_params.HasParam("use_xnnpack")) { tool_params.Set<bool>("use_xnnpack", true); } if (tool_params.HasParam("xnnpack_force_fp16")) { tool_params.Set<bool>("xnnpack_force_fp16", true); } break; case TfliteInferenceParams::COREML: if (tool_params.HasParam("use_coreml")) { tool_params.Set<bool>("use_coreml", true); } break; default: break; } return tool_params; }
TEST(EvaluationDelegateProviderTest, DelegateProvidersParams) { DelegateProviders providers; const auto& params = providers.GetAllParams(); EXPECT_TRUE(params.HasParam("use_nnapi")); EXPECT_TRUE(params.HasParam("use_gpu")); int argc = 3; const char* argv[] = {"program_name", "--use_gpu=true", "--other_undefined_flag=1"}; EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv)); EXPECT_TRUE(params.Get<bool>("use_gpu")); EXPECT_EQ(2, argc); EXPECT_EQ("--other_undefined_flag=1", argv[1]); } TEST(EvaluationDelegateProviderTest, GetAllParamsWithTfliteInferenceParams) { DelegateProviders providers; int argc = 2; const char* argv[] = {"program_name", "--num_threads=1"}; EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv)); const auto& default_params = providers.GetAllParams(); EXPECT_EQ(1, default_params.Get<int>("num_threads")); TfliteInferenceParams params; params.set_delegate(TfliteInferenceParams::NONE); params.set_num_threads(4); tools::ToolParams tool_params = providers.GetAllParams(params); EXPECT_EQ(4, tool_params.Get<int>("num_threads")); EXPECT_EQ(1, argc); }
#include "quiche/quic/core/quic_flow_controller.h" #include <algorithm> #include <cstdint> #include <string> #include "absl/strings/str_cat.h" #include "quiche/quic/core/quic_connection.h" #include "quiche/quic/core/quic_packets.h" #include "quiche/quic/core/quic_session.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/quic/platform/api/quic_flag_utils.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_logging.h" namespace quic { #define ENDPOINT \ (perspective_ == Perspective::IS_SERVER ? "Server: " : "Client: ") std::string QuicFlowController::LogLabel() { if (is_connection_flow_controller_) { return "connection"; } return absl::StrCat("stream ", id_); } QuicFlowController::QuicFlowController( QuicSession* session, QuicStreamId id, bool is_connection_flow_controller, QuicStreamOffset send_window_offset, QuicStreamOffset receive_window_offset, QuicByteCount receive_window_size_limit, bool should_auto_tune_receive_window, QuicFlowControllerInterface* session_flow_controller) : session_(session), connection_(session->connection()), id_(id), is_connection_flow_controller_(is_connection_flow_controller), perspective_(session->perspective()), bytes_sent_(0), send_window_offset_(send_window_offset), bytes_consumed_(0), highest_received_byte_offset_(0), receive_window_offset_(receive_window_offset), receive_window_size_(receive_window_offset), receive_window_size_limit_(receive_window_size_limit), auto_tune_receive_window_(should_auto_tune_receive_window), session_flow_controller_(session_flow_controller), last_blocked_send_window_offset_(0), prev_window_update_time_(QuicTime::Zero()) { QUICHE_DCHECK_LE(receive_window_size_, receive_window_size_limit_); QUICHE_DCHECK_EQ( is_connection_flow_controller_, QuicUtils::GetInvalidStreamId(session_->transport_version()) == id_); QUIC_DVLOG(1) << ENDPOINT << "Created flow controller for " << LogLabel() << ", setting initial receive window offset to: " << receive_window_offset_ << ", max receive window to: " << receive_window_size_ << ", max receive window limit to: " << receive_window_size_limit_ << ", setting send window offset to: " << send_window_offset_; } void QuicFlowController::AddBytesConsumed(QuicByteCount bytes_consumed) { bytes_consumed_ += bytes_consumed; QUIC_DVLOG(1) << ENDPOINT << LogLabel() << " consumed " << bytes_consumed_ << " bytes."; MaybeSendWindowUpdate(); } bool QuicFlowController::UpdateHighestReceivedOffset( QuicStreamOffset new_offset) { if (new_offset <= highest_received_byte_offset_) { return false; } QUIC_DVLOG(1) << ENDPOINT << LogLabel() << " highest byte offset increased from " << highest_received_byte_offset_ << " to " << new_offset; highest_received_byte_offset_ = new_offset; return true; } void QuicFlowController::AddBytesSent(QuicByteCount bytes_sent) { if (bytes_sent_ + bytes_sent > send_window_offset_) { QUIC_BUG(quic_bug_10836_1) << ENDPOINT << LogLabel() << " Trying to send an extra " << bytes_sent << " bytes, when bytes_sent = " << bytes_sent_ << ", and send_window_offset_ = " << send_window_offset_; bytes_sent_ = send_window_offset_; connection_->CloseConnection( QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA, absl::StrCat(send_window_offset_ - (bytes_sent_ + bytes_sent), "bytes over send window offset"), ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET); return; } bytes_sent_ += bytes_sent; QUIC_DVLOG(1) << ENDPOINT << LogLabel() << " sent " << bytes_sent_ << " bytes."; } bool QuicFlowController::FlowControlViolation() { if (highest_received_byte_offset_ > receive_window_offset_) { QUIC_DLOG(INFO) << ENDPOINT << "Flow control violation on " << LogLabel() << ", receive window offset: " << receive_window_offset_ << ", highest received byte offset: " << highest_received_byte_offset_; return true; } return false; } void QuicFlowController::MaybeIncreaseMaxWindowSize() { QuicTime now = connection_->clock()->ApproximateNow(); QuicTime prev = prev_window_update_time_; prev_window_update_time_ = now; if (!prev.IsInitialized()) { QUIC_DVLOG(1) << ENDPOINT << "first window update for " << LogLabel(); return; } if (!auto_tune_receive_window_) { return; } QuicTime::Delta rtt = connection_->sent_packet_manager().GetRttStats()->smoothed_rtt(); if (rtt.IsZero()) { QUIC_DVLOG(1) << ENDPOINT << "rtt zero for " << LogLabel(); return; } QuicTime::Delta since_last = now - prev; QuicTime::Delta two_rtt = 2 * rtt; if (since_last >= two_rtt) { return; } QuicByteCount old_window = receive_window_size_; IncreaseWindowSize(); if (receive_window_size_ > old_window) { QUIC_DVLOG(1) << ENDPOINT << "New max window increase for " << LogLabel() << " after " << since_last.ToMicroseconds() << " us, and RTT is " << rtt.ToMicroseconds() << "us. max wndw: " << receive_window_size_; if (session_flow_controller_ != nullptr) { session_flow_controller_->EnsureWindowAtLeast( kSessionFlowControlMultiplier * receive_window_size_); } } else { QUIC_LOG_FIRST_N(INFO, 1) << ENDPOINT << "Max window at limit for " << LogLabel() << " after " << since_last.ToMicroseconds() << " us, and RTT is " << rtt.ToMicroseconds() << "us. Limit size: " << receive_window_size_; } } void QuicFlowController::IncreaseWindowSize() { receive_window_size_ *= 2; receive_window_size_ = std::min(receive_window_size_, receive_window_size_limit_); } QuicByteCount QuicFlowController::WindowUpdateThreshold() { return receive_window_size_ / 2; } void QuicFlowController::MaybeSendWindowUpdate() { if (!session_->connection()->connected()) { return; } QUICHE_DCHECK_LE(bytes_consumed_, receive_window_offset_); QuicStreamOffset available_window = receive_window_offset_ - bytes_consumed_; QuicByteCount threshold = WindowUpdateThreshold(); if (!prev_window_update_time_.IsInitialized()) { prev_window_update_time_ = connection_->clock()->ApproximateNow(); } if (available_window >= threshold) { QUIC_DVLOG(1) << ENDPOINT << "Not sending WindowUpdate for " << LogLabel() << ", available window: " << available_window << " >= threshold: " << threshold; return; } MaybeIncreaseMaxWindowSize(); UpdateReceiveWindowOffsetAndSendWindowUpdate(available_window); } void QuicFlowController::UpdateReceiveWindowOffsetAndSendWindowUpdate( QuicStreamOffset available_window) { receive_window_offset_ += (receive_window_size_ - available_window); QUIC_DVLOG(1) << ENDPOINT << "Sending WindowUpdate frame for " << LogLabel() << ", consumed bytes: " << bytes_consumed_ << ", available window: " << available_window << ", and threshold: " << WindowUpdateThreshold() << ", and receive window size: " << receive_window_size_ << ". New receive window offset is: " << receive_window_offset_; SendWindowUpdate(); } void QuicFlowController::MaybeSendBlocked() { if (SendWindowSize() != 0 || last_blocked_send_window_offset_ >= send_window_offset_) { return; } QUIC_DLOG(INFO) << ENDPOINT << LogLabel() << " is flow control blocked. " << "Send window: " << SendWindowSize() << ", bytes sent: " << bytes_sent_ << ", send limit: " << send_window_offset_; last_blocked_send_window_offset_ = send_window_offset_; session_->SendBlocked(id_, last_blocked_send_window_offset_); } bool QuicFlowController::UpdateSendWindowOffset( QuicStreamOffset new_send_window_offset) { if (new_send_window_offset <= send_window_offset_) { return false; } QUIC_DVLOG(1) << ENDPOINT << "UpdateSendWindowOffset for " << LogLabel() << " with new offset " << new_send_window_offset << " current offset: " << send_window_offset_ << " bytes_sent: " << bytes_sent_; const bool was_previously_blocked = IsBlocked(); send_window_offset_ = new_send_window_offset; return was_previously_blocked; } void QuicFlowController::EnsureWindowAtLeast(QuicByteCount window_size) { if (receive_window_size_limit_ >= window_size) { return; } QuicStreamOffset available_window = receive_window_offset_ - bytes_consumed_; IncreaseWindowSize(); UpdateReceiveWindowOffsetAndSendWindowUpdate(available_window); } bool QuicFlowController::IsBlocked() const { return SendWindowSize() == 0; } uint64_t QuicFlowController::SendWindowSize() const { if (bytes_sent_ > send_window_offset_) { return 0; } return send_window_offset_ - bytes_sent_; } void QuicFlowController::UpdateReceiveWindowSize(QuicStreamOffset size) { QUICHE_DCHECK_LE(size, receive_window_size_limit_); QUIC_DVLOG(1) << ENDPOINT << "UpdateReceiveWindowSize for " << LogLabel() << ": " << size; if (receive_window_size_ != receive_window_offset_) { QUIC_BUG(quic_bug_10836_2) << "receive_window_size_:" << receive_window_size_ << " != receive_window_offset:" << receive_window_offset_; return; } receive_window_size_ = size; receive_window_offset_ = size; } void QuicFlowController::SendWindowUpdate() { QuicStreamId id = id_; if (is_connection_flow_controller_) { id = QuicUtils::GetInvalidStreamId(connection_->transport_version()); } session_->SendWindowUpdate(id, receive_window_offset_); } }
#include "quiche/quic/core/quic_flow_controller.h" #include <memory> #include <utility> #include "absl/strings/str_cat.h" #include "quiche/quic/core/crypto/null_encrypter.h" #include "quiche/quic/platform/api/quic_expect_bug.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/quic_connection_peer.h" #include "quiche/quic/test_tools/quic_flow_controller_peer.h" #include "quiche/quic/test_tools/quic_sent_packet_manager_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" using testing::_; using testing::Invoke; using testing::StrictMock; namespace quic { namespace test { const int64_t kRtt = 100; class MockFlowController : public QuicFlowControllerInterface { public: MockFlowController() {} MockFlowController(const MockFlowController&) = delete; MockFlowController& operator=(const MockFlowController&) = delete; ~MockFlowController() override {} MOCK_METHOD(void, EnsureWindowAtLeast, (QuicByteCount), (override)); }; class QuicFlowControllerTest : public QuicTest { public: void Initialize() { connection_ = new MockQuicConnection(&helper_, &alarm_factory_, Perspective::IS_CLIENT); connection_->SetEncrypter( ENCRYPTION_FORWARD_SECURE, std::make_unique<NullEncrypter>(connection_->perspective())); session_ = std::make_unique<StrictMock<MockQuicSession>>(connection_); flow_controller_ = std::make_unique<QuicFlowController>( session_.get(), stream_id_, false, send_window_, receive_window_, kStreamReceiveWindowLimit, should_auto_tune_receive_window_, &session_flow_controller_); } protected: QuicStreamId stream_id_ = 1234; QuicByteCount send_window_ = kInitialSessionFlowControlWindowForTest; QuicByteCount receive_window_ = kInitialSessionFlowControlWindowForTest; std::unique_ptr<QuicFlowController> flow_controller_; MockQuicConnectionHelper helper_; MockAlarmFactory alarm_factory_; MockQuicConnection* connection_; std::unique_ptr<StrictMock<MockQuicSession>> session_; MockFlowController session_flow_controller_; bool should_auto_tune_receive_window_ = false; }; TEST_F(QuicFlowControllerTest, SendingBytes) { Initialize(); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(send_window_, flow_controller_->SendWindowSize()); flow_controller_->AddBytesSent(send_window_ / 2); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_EQ(send_window_ / 2, flow_controller_->SendWindowSize()); flow_controller_->AddBytesSent(send_window_ / 2); EXPECT_TRUE(flow_controller_->IsBlocked()); EXPECT_EQ(0u, flow_controller_->SendWindowSize()); EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1); flow_controller_->MaybeSendBlocked(); EXPECT_TRUE(flow_controller_->UpdateSendWindowOffset(2 * send_window_)); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_EQ(send_window_, flow_controller_->SendWindowSize()); EXPECT_FALSE(flow_controller_->UpdateSendWindowOffset(send_window_ / 10)); EXPECT_EQ(send_window_, flow_controller_->SendWindowSize()); EXPECT_QUIC_BUG( { EXPECT_CALL( *connection_, CloseConnection(QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA, _, _)); flow_controller_->AddBytesSent(send_window_ * 10); EXPECT_TRUE(flow_controller_->IsBlocked()); EXPECT_EQ(0u, flow_controller_->SendWindowSize()); }, absl::StrCat("Trying to send an extra ", send_window_ * 10, " bytes")); } TEST_F(QuicFlowControllerTest, ReceivingBytes) { Initialize(); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); EXPECT_TRUE( flow_controller_->UpdateHighestReceivedOffset(1 + receive_window_ / 2)); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ((receive_window_ / 2) - 1, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); EXPECT_CALL(*session_, WriteControlFrame(_, _)).Times(1); flow_controller_->AddBytesConsumed(1 + receive_window_ / 2); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); } TEST_F(QuicFlowControllerTest, Move) { Initialize(); flow_controller_->AddBytesSent(send_window_ / 2); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_EQ(send_window_ / 2, flow_controller_->SendWindowSize()); EXPECT_TRUE( flow_controller_->UpdateHighestReceivedOffset(1 + receive_window_ / 2)); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ((receive_window_ / 2) - 1, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); QuicFlowController flow_controller2(std::move(*flow_controller_)); EXPECT_EQ(send_window_ / 2, flow_controller2.SendWindowSize()); EXPECT_FALSE(flow_controller2.FlowControlViolation()); EXPECT_EQ((receive_window_ / 2) - 1, QuicFlowControllerPeer::ReceiveWindowSize(&flow_controller2)); } TEST_F(QuicFlowControllerTest, OnlySendBlockedFrameOncePerOffset) { Initialize(); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(send_window_, flow_controller_->SendWindowSize()); flow_controller_->AddBytesSent(send_window_); EXPECT_TRUE(flow_controller_->IsBlocked()); EXPECT_EQ(0u, flow_controller_->SendWindowSize()); EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1); flow_controller_->MaybeSendBlocked(); EXPECT_CALL(*session_, SendBlocked(_, _)).Times(0); flow_controller_->MaybeSendBlocked(); flow_controller_->MaybeSendBlocked(); flow_controller_->MaybeSendBlocked(); flow_controller_->MaybeSendBlocked(); flow_controller_->MaybeSendBlocked(); EXPECT_TRUE(flow_controller_->UpdateSendWindowOffset(2 * send_window_)); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_EQ(send_window_, flow_controller_->SendWindowSize()); flow_controller_->AddBytesSent(send_window_); EXPECT_TRUE(flow_controller_->IsBlocked()); EXPECT_EQ(0u, flow_controller_->SendWindowSize()); EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1); flow_controller_->MaybeSendBlocked(); } TEST_F(QuicFlowControllerTest, ReceivingBytesFastIncreasesFlowWindow) { should_auto_tune_receive_window_ = true; Initialize(); EXPECT_CALL(*session_, WriteControlFrame(_, _)).Times(1); EXPECT_TRUE(flow_controller_->auto_tune_receive_window()); connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1)); QuicSentPacketManager* manager = QuicConnectionPeer::GetSentPacketManager(connection_); RttStats* rtt_stats = const_cast<RttStats*>(manager->GetRttStats()); rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kRtt), QuicTime::Delta::Zero(), QuicTime::Zero()); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); QuicByteCount threshold = QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get()); QuicStreamOffset receive_offset = threshold + 1; EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset)); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest - receive_offset, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); EXPECT_CALL( session_flow_controller_, EnsureWindowAtLeast(kInitialSessionFlowControlWindowForTest * 2 * 1.5)); flow_controller_->AddBytesConsumed(threshold + 1); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(2 * kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(2 * kRtt - 1)); receive_offset += threshold + 1; EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset)); flow_controller_->AddBytesConsumed(threshold + 1); EXPECT_FALSE(flow_controller_->FlowControlViolation()); QuicByteCount new_threshold = QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get()); EXPECT_GT(new_threshold, threshold); } TEST_F(QuicFlowControllerTest, ReceivingBytesFastNoAutoTune) { Initialize(); EXPECT_CALL(*session_, WriteControlFrame(_, _)) .Times(2) .WillRepeatedly(Invoke(&ClearControlFrameWithTransmissionType)); EXPECT_FALSE(flow_controller_->auto_tune_receive_window()); connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1)); QuicSentPacketManager* manager = QuicConnectionPeer::GetSentPacketManager(connection_); RttStats* rtt_stats = const_cast<RttStats*>(manager->GetRttStats()); rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kRtt), QuicTime::Delta::Zero(), QuicTime::Zero()); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); QuicByteCount threshold = QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get()); QuicStreamOffset receive_offset = threshold + 1; EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset)); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest - receive_offset, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); flow_controller_->AddBytesConsumed(threshold + 1); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(2 * kRtt - 1)); receive_offset += threshold + 1; EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset)); flow_controller_->AddBytesConsumed(threshold + 1); EXPECT_FALSE(flow_controller_->FlowControlViolation()); QuicByteCount new_threshold = QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get()); EXPECT_EQ(new_threshold, threshold); } TEST_F(QuicFlowControllerTest, ReceivingBytesNormalStableFlowWindow) { should_auto_tune_receive_window_ = true; Initialize(); EXPECT_CALL(*session_, WriteControlFrame(_, _)).Times(1); EXPECT_TRUE(flow_controller_->auto_tune_receive_window()); connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1)); QuicSentPacketManager* manager = QuicConnectionPeer::GetSentPacketManager(connection_); RttStats* rtt_stats = const_cast<RttStats*>(manager->GetRttStats()); rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kRtt), QuicTime::Delta::Zero(), QuicTime::Zero()); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); QuicByteCount threshold = QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get()); QuicStreamOffset receive_offset = threshold + 1; EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset)); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest - receive_offset, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); EXPECT_CALL( session_flow_controller_, EnsureWindowAtLeast(kInitialSessionFlowControlWindowForTest * 2 * 1.5)); flow_controller_->AddBytesConsumed(threshold + 1); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(2 * kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(2 * kRtt + 1)); receive_offset += threshold + 1; EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset)); flow_controller_->AddBytesConsumed(threshold + 1); EXPECT_FALSE(flow_controller_->FlowControlViolation()); QuicByteCount new_threshold = QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get()); EXPECT_EQ(new_threshold, 2 * threshold); } TEST_F(QuicFlowControllerTest, ReceivingBytesNormalNoAutoTune) { Initialize(); EXPECT_CALL(*session_, WriteControlFrame(_, _)) .Times(2) .WillRepeatedly(Invoke(&ClearControlFrameWithTransmissionType)); EXPECT_FALSE(flow_controller_->auto_tune_receive_window()); connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1)); QuicSentPacketManager* manager = QuicConnectionPeer::GetSentPacketManager(connection_); RttStats* rtt_stats = const_cast<RttStats*>(manager->GetRttStats()); rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kRtt), QuicTime::Delta::Zero(), QuicTime::Zero()); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); QuicByteCount threshold = QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get()); QuicStreamOffset receive_offset = threshold + 1; EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset)); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest - receive_offset, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); flow_controller_->AddBytesConsumed(threshold + 1); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(kInitialSessionFlowControlWindowForTest, QuicFlowControllerPeer::ReceiveWindowSize(flow_controller_.get())); connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(2 * kRtt + 1)); receive_offset += threshold + 1; EXPECT_TRUE(flow_controller_->UpdateHighestReceivedOffset(receive_offset)); flow_controller_->AddBytesConsumed(threshold + 1); EXPECT_FALSE(flow_controller_->FlowControlViolation()); QuicByteCount new_threshold = QuicFlowControllerPeer::WindowUpdateThreshold(flow_controller_.get()); EXPECT_EQ(new_threshold, threshold); } } }
bool QuicFlowController::UpdateSendWindowOffset( QuicStreamOffset new_send_window_offset) { if (new_send_window_offset <= send_window_offset_) { return false; } QUIC_DVLOG(1) << ENDPOINT << "UpdateSendWindowOffset for " << LogLabel() << " with new offset " << new_send_window_offset << " current offset: " << send_window_offset_ << " bytes_sent: " << bytes_sent_; const bool was_previously_blocked = IsBlocked(); send_window_offset_ = new_send_window_offset; return was_previously_blocked; }
TEST_F(QuicFlowControllerTest, SendingBytes) { Initialize(); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_FALSE(flow_controller_->FlowControlViolation()); EXPECT_EQ(send_window_, flow_controller_->SendWindowSize()); flow_controller_->AddBytesSent(send_window_ / 2); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_EQ(send_window_ / 2, flow_controller_->SendWindowSize()); flow_controller_->AddBytesSent(send_window_ / 2); EXPECT_TRUE(flow_controller_->IsBlocked()); EXPECT_EQ(0u, flow_controller_->SendWindowSize()); EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1); flow_controller_->MaybeSendBlocked(); EXPECT_TRUE(flow_controller_->UpdateSendWindowOffset(2 * send_window_)); EXPECT_FALSE(flow_controller_->IsBlocked()); EXPECT_EQ(send_window_, flow_controller_->SendWindowSize()); EXPECT_FALSE(flow_controller_->UpdateSendWindowOffset(send_window_ / 10)); EXPECT_EQ(send_window_, flow_controller_->SendWindowSize()); EXPECT_QUIC_BUG( { EXPECT_CALL( *connection_, CloseConnection(QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA, _, _)); flow_controller_->AddBytesSent(send_window_ * 10); EXPECT_TRUE(flow_controller_->IsBlocked()); EXPECT_EQ(0u, flow_controller_->SendWindowSize()); }, absl::StrCat("Trying to send an extra ", send_window_ * 10, " bytes")); }
#include "quiche/quic/platform/api/quic_socket_address.h" #include <cstring> #include <limits> #include <string> #include "absl/strings/str_cat.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/quic/platform/api/quic_ip_address.h" #include "quiche/quic/platform/api/quic_ip_address_family.h" namespace quic { namespace { uint32_t HashIP(const QuicIpAddress& ip) { if (ip.IsIPv4()) { return ip.GetIPv4().s_addr; } if (ip.IsIPv6()) { auto v6addr = ip.GetIPv6(); const uint32_t* v6_as_ints = reinterpret_cast<const uint32_t*>(&v6addr.s6_addr); return v6_as_ints[0] ^ v6_as_ints[1] ^ v6_as_ints[2] ^ v6_as_ints[3]; } return 0; } } QuicSocketAddress::QuicSocketAddress(QuicIpAddress address, uint16_t port) : host_(address), port_(port) {} QuicSocketAddress::QuicSocketAddress(const struct sockaddr_storage& saddr) { switch (saddr.ss_family) { case AF_INET: { const sockaddr_in* v4 = reinterpret_cast<const sockaddr_in*>(&saddr); host_ = QuicIpAddress(v4->sin_addr); port_ = ntohs(v4->sin_port); break; } case AF_INET6: { const sockaddr_in6* v6 = reinterpret_cast<const sockaddr_in6*>(&saddr); host_ = QuicIpAddress(v6->sin6_addr); port_ = ntohs(v6->sin6_port); break; } default: QUIC_BUG(quic_bug_10075_1) << "Unknown address family passed: " << saddr.ss_family; break; } } QuicSocketAddress::QuicSocketAddress(const sockaddr* saddr, socklen_t len) { sockaddr_storage storage; static_assert(std::numeric_limits<socklen_t>::max() >= sizeof(storage), "Cannot cast sizeof(storage) to socklen_t as it does not fit"); if (len < static_cast<socklen_t>(sizeof(sockaddr)) || (saddr->sa_family == AF_INET && len < static_cast<socklen_t>(sizeof(sockaddr_in))) || (saddr->sa_family == AF_INET6 && len < static_cast<socklen_t>(sizeof(sockaddr_in6))) || len > static_cast<socklen_t>(sizeof(storage))) { QUIC_BUG(quic_bug_10075_2) << "Socket address of invalid length provided"; return; } memcpy(&storage, saddr, len); *this = QuicSocketAddress(storage); } bool operator==(const QuicSocketAddress& lhs, const QuicSocketAddress& rhs) { return lhs.host_ == rhs.host_ && lhs.port_ == rhs.port_; } bool operator!=(const QuicSocketAddress& lhs, const QuicSocketAddress& rhs) { return !(lhs == rhs); } bool QuicSocketAddress::IsInitialized() const { return host_.IsInitialized(); } std::string QuicSocketAddress::ToString() const { switch (host_.address_family()) { case IpAddressFamily::IP_V4: return absl::StrCat(host_.ToString(), ":", port_); case IpAddressFamily::IP_V6: return absl::StrCat("[", host_.ToString(), "]:", port_); default: return ""; } } int QuicSocketAddress::FromSocket(int fd) { sockaddr_storage addr; socklen_t addr_len = sizeof(addr); int result = getsockname(fd, reinterpret_cast<sockaddr*>(&addr), &addr_len); bool success = result == 0 && addr_len > 0 && static_cast<size_t>(addr_len) <= sizeof(addr); if (success) { *this = QuicSocketAddress(addr); return 0; } return -1; } QuicSocketAddress QuicSocketAddress::Normalized() const { return QuicSocketAddress(host_.Normalized(), port_); } QuicIpAddress QuicSocketAddress::host() const { return host_; } uint16_t QuicSocketAddress::port() const { return port_; } sockaddr_storage QuicSocketAddress::generic_address() const { union { sockaddr_storage storage; sockaddr_in v4; sockaddr_in6 v6; } result; memset(&result.storage, 0, sizeof(result.storage)); switch (host_.address_family()) { case IpAddressFamily::IP_V4: result.v4.sin_family = AF_INET; result.v4.sin_addr = host_.GetIPv4(); result.v4.sin_port = htons(port_); break; case IpAddressFamily::IP_V6: result.v6.sin6_family = AF_INET6; result.v6.sin6_addr = host_.GetIPv6(); result.v6.sin6_port = htons(port_); break; default: result.storage.ss_family = AF_UNSPEC; break; } return result.storage; } uint32_t QuicSocketAddress::Hash() const { uint32_t value = 0; value ^= HashIP(host_); value ^= port_ | (port_ << 16); return value; } }
#include "quiche/quic/platform/api/quic_socket_address.h" #include <memory> #include <sstream> #include "quiche/quic/platform/api/quic_ip_address.h" #include "quiche/quic/platform/api/quic_test.h" namespace quic { namespace { TEST(QuicSocketAddress, Uninitialized) { QuicSocketAddress uninitialized; EXPECT_FALSE(uninitialized.IsInitialized()); } TEST(QuicSocketAddress, ExplicitConstruction) { QuicSocketAddress ipv4_address(QuicIpAddress::Loopback4(), 443); QuicSocketAddress ipv6_address(QuicIpAddress::Loopback6(), 443); EXPECT_TRUE(ipv4_address.IsInitialized()); EXPECT_EQ("127.0.0.1:443", ipv4_address.ToString()); EXPECT_EQ("[::1]:443", ipv6_address.ToString()); EXPECT_EQ(QuicIpAddress::Loopback4(), ipv4_address.host()); EXPECT_EQ(QuicIpAddress::Loopback6(), ipv6_address.host()); EXPECT_EQ(443, ipv4_address.port()); } TEST(QuicSocketAddress, OutputToStream) { QuicSocketAddress ipv4_address(QuicIpAddress::Loopback4(), 443); std::stringstream stream; stream << ipv4_address; EXPECT_EQ("127.0.0.1:443", stream.str()); } TEST(QuicSocketAddress, FromSockaddrIPv4) { union { sockaddr_storage storage; sockaddr addr; sockaddr_in v4; } address; memset(&address, 0, sizeof(address)); address.v4.sin_family = AF_INET; address.v4.sin_addr = QuicIpAddress::Loopback4().GetIPv4(); address.v4.sin_port = htons(443); EXPECT_EQ("127.0.0.1:443", QuicSocketAddress(&address.addr, sizeof(address.v4)).ToString()); EXPECT_EQ("127.0.0.1:443", QuicSocketAddress(address.storage).ToString()); } TEST(QuicSocketAddress, FromSockaddrIPv6) { union { sockaddr_storage storage; sockaddr addr; sockaddr_in6 v6; } address; memset(&address, 0, sizeof(address)); address.v6.sin6_family = AF_INET6; address.v6.sin6_addr = QuicIpAddress::Loopback6().GetIPv6(); address.v6.sin6_port = htons(443); EXPECT_EQ("[::1]:443", QuicSocketAddress(&address.addr, sizeof(address.v6)).ToString()); EXPECT_EQ("[::1]:443", QuicSocketAddress(address.storage).ToString()); } TEST(QuicSocketAddres, ToSockaddrIPv4) { union { sockaddr_storage storage; sockaddr_in v4; } address; address.storage = QuicSocketAddress(QuicIpAddress::Loopback4(), 443).generic_address(); ASSERT_EQ(AF_INET, address.v4.sin_family); EXPECT_EQ(QuicIpAddress::Loopback4(), QuicIpAddress(address.v4.sin_addr)); EXPECT_EQ(htons(443), address.v4.sin_port); } TEST(QuicSocketAddress, Normalize) { QuicIpAddress dual_stacked; ASSERT_TRUE(dual_stacked.FromString("::ffff:127.0.0.1")); ASSERT_TRUE(dual_stacked.IsIPv6()); QuicSocketAddress not_normalized(dual_stacked, 443); QuicSocketAddress normalized = not_normalized.Normalized(); EXPECT_EQ("[::ffff:127.0.0.1]:443", not_normalized.ToString()); EXPECT_EQ("127.0.0.1:443", normalized.ToString()); } #if defined(__linux__) && !defined(ANDROID) #include <errno.h> #include <sys/socket.h> #include <sys/types.h> TEST(QuicSocketAddress, FromSocket) { int fd; QuicSocketAddress address; bool bound = false; for (int port = 50000; port < 50400; port++) { fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP); ASSERT_GT(fd, 0); address = QuicSocketAddress(QuicIpAddress::Loopback6(), port); sockaddr_storage raw_address = address.generic_address(); int bind_result = bind(fd, reinterpret_cast<const sockaddr*>(&raw_address), sizeof(sockaddr_in6)); if (bind_result < 0 && errno == EADDRINUSE) { close(fd); continue; } ASSERT_EQ(0, bind_result); bound = true; break; } ASSERT_TRUE(bound); QuicSocketAddress real_address; ASSERT_EQ(0, real_address.FromSocket(fd)); ASSERT_TRUE(real_address.IsInitialized()); EXPECT_EQ(real_address, address); close(fd); } #endif } }
std::string QuicSocketAddress::ToString() const { switch (host_.address_family()) { case IpAddressFamily::IP_V4: return absl::StrCat(host_.ToString(), ":", port_); case IpAddressFamily::IP_V6: return absl::StrCat("[", host_.ToString(), "]:", port_); default: return ""; } }
TEST(QuicSocketAddress, ExplicitConstruction) { QuicSocketAddress ipv4_address(QuicIpAddress::Loopback4(), 443); QuicSocketAddress ipv6_address(QuicIpAddress::Loopback6(), 443); EXPECT_TRUE(ipv4_address.IsInitialized()); EXPECT_EQ("127.0.0.1:443", ipv4_address.ToString()); EXPECT_EQ("[::1]:443", ipv6_address.ToString()); EXPECT_EQ(QuicIpAddress::Loopback4(), ipv4_address.host()); EXPECT_EQ(QuicIpAddress::Loopback6(), ipv6_address.host()); EXPECT_EQ(443, ipv4_address.port()); } TEST(QuicSocketAddress, OutputToStream) { QuicSocketAddress ipv4_address(QuicIpAddress::Loopback4(), 443); std::stringstream stream; stream << ipv4_address; EXPECT_EQ("127.0.0.1:443", stream.str()); } TEST(QuicSocketAddress, FromSockaddrIPv4) { union { sockaddr_storage storage; sockaddr addr; sockaddr_in v4; } address; memset(&address, 0, sizeof(address)); address.v4.sin_family = AF_INET; address.v4.sin_addr = QuicIpAddress::Loopback4().GetIPv4(); address.v4.sin_port = htons(443); EXPECT_EQ("127.0.0.1:443", QuicSocketAddress(&address.addr, sizeof(address.v4)).ToString()); EXPECT_EQ("127.0.0.1:443", QuicSocketAddress(address.storage).ToString()); } TEST(QuicSocketAddress, FromSockaddrIPv6) { union { sockaddr_storage storage; sockaddr addr; sockaddr_in6 v6; } address; memset(&address, 0, sizeof(address)); address.v6.sin6_family = AF_INET6; address.v6.sin6_addr = QuicIpAddress::Loopback6().GetIPv6(); address.v6.sin6_port = htons(443); EXPECT_EQ("[::1]:443", QuicSocketAddress(&address.addr, sizeof(address.v6)).ToString()); EXPECT_EQ("[::1]:443", QuicSocketAddress(address.storage).ToString()); } TEST(QuicSocketAddres, ToSockaddrIPv4) { union { sockaddr_storage storage; sockaddr_in v4; } address; address.storage = QuicSocketAddress(QuicIpAddress::Loopback4(), 443).generic_address(); ASSERT_EQ(AF_INET, address.v4.sin_family); EXPECT_EQ(QuicIpAddress::Loopback4(), QuicIpAddress(address.v4.sin_addr)); EXPECT_EQ(htons(443), address.v4.sin_port); } TEST(QuicSocketAddress, Normalize) { QuicIpAddress dual_stacked; ASSERT_TRUE(dual_stacked.FromString("::ffff:127.0.0.1")); ASSERT_TRUE(dual_stacked.IsIPv6()); QuicSocketAddress not_normalized(dual_stacked, 443); QuicSocketAddress normalized = not_normalized.Normalized(); EXPECT_EQ("[::ffff:127.0.0.1]:443", not_normalized.ToString()); EXPECT_EQ("127.0.0.1:443", normalized.ToString()); }
#include "absl/strings/internal/cordz_functions.h" #include <atomic> #include <cmath> #include <limits> #include <random> #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/profiling/internal/exponential_biased.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { namespace { std::atomic<int> g_cordz_mean_interval(50000); } #ifdef ABSL_INTERNAL_CORDZ_ENABLED static constexpr int64_t kInitCordzNextSample = -1; ABSL_CONST_INIT thread_local SamplingState cordz_next_sample = { kInitCordzNextSample, 1}; constexpr int64_t kIntervalIfDisabled = 1 << 16; ABSL_ATTRIBUTE_NOINLINE int64_t cordz_should_profile_slow(SamplingState& state) { thread_local absl::profiling_internal::ExponentialBiased exponential_biased_generator; int32_t mean_interval = get_cordz_mean_interval(); if (mean_interval <= 0) { state = {kIntervalIfDisabled, kIntervalIfDisabled}; return 0; } if (mean_interval == 1) { state = {1, 1}; return 1; } if (cordz_next_sample.next_sample <= 0) { const bool initialized = cordz_next_sample.next_sample != kInitCordzNextSample; auto old_stride = state.sample_stride; auto stride = exponential_biased_generator.GetStride(mean_interval); state = {stride, stride}; bool should_sample = initialized || cordz_should_profile() > 0; return should_sample ? old_stride : 0; } --state.next_sample; return 0; } void cordz_set_next_sample_for_testing(int64_t next_sample) { cordz_next_sample = {next_sample, next_sample}; } #endif int32_t get_cordz_mean_interval() { return g_cordz_mean_interval.load(std::memory_order_acquire); } void set_cordz_mean_interval(int32_t mean_interval) { g_cordz_mean_interval.store(mean_interval, std::memory_order_release); } } ABSL_NAMESPACE_END }
#include "absl/strings/internal/cordz_functions.h" #include <thread> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { namespace { using ::testing::Eq; using ::testing::Ge; using ::testing::Le; TEST(CordzFunctionsTest, SampleRate) { int32_t orig_sample_rate = get_cordz_mean_interval(); int32_t expected_sample_rate = 123; set_cordz_mean_interval(expected_sample_rate); EXPECT_THAT(get_cordz_mean_interval(), Eq(expected_sample_rate)); set_cordz_mean_interval(orig_sample_rate); } #ifdef ABSL_INTERNAL_CORDZ_ENABLED TEST(CordzFunctionsTest, ShouldProfileDisable) { int32_t orig_sample_rate = get_cordz_mean_interval(); set_cordz_mean_interval(0); cordz_set_next_sample_for_testing(0); EXPECT_EQ(cordz_should_profile(), 0); EXPECT_THAT(cordz_next_sample.next_sample, Eq(1 << 16)); set_cordz_mean_interval(orig_sample_rate); } TEST(CordzFunctionsTest, ShouldProfileAlways) { int32_t orig_sample_rate = get_cordz_mean_interval(); set_cordz_mean_interval(1); cordz_set_next_sample_for_testing(1); EXPECT_GT(cordz_should_profile(), 0); EXPECT_THAT(cordz_next_sample.next_sample, Le(1)); set_cordz_mean_interval(orig_sample_rate); } TEST(CordzFunctionsTest, DoesNotAlwaysSampleFirstCord) { set_cordz_mean_interval(10000); int tries = 0; bool sampled = false; do { ++tries; ASSERT_THAT(tries, Le(1000)); std::thread thread([&sampled] { sampled = cordz_should_profile() > 0; }); thread.join(); } while (sampled); } TEST(CordzFunctionsTest, ShouldProfileRate) { static constexpr int kDesiredMeanInterval = 1000; static constexpr int kSamples = 10000; int32_t orig_sample_rate = get_cordz_mean_interval(); set_cordz_mean_interval(kDesiredMeanInterval); int64_t sum_of_intervals = 0; for (int i = 0; i < kSamples; i++) { cordz_set_next_sample_for_testing(0); cordz_should_profile(); sum_of_intervals += cordz_next_sample.next_sample; } EXPECT_THAT(sum_of_intervals, Ge(9396115)); EXPECT_THAT(sum_of_intervals, Le(10618100)); set_cordz_mean_interval(orig_sample_rate); } #else TEST(CordzFunctionsTest, ShouldProfileDisabled) { int32_t orig_sample_rate = get_cordz_mean_interval(); set_cordz_mean_interval(1); cordz_set_next_sample_for_testing(0); EXPECT_FALSE(cordz_should_profile()); set_cordz_mean_interval(orig_sample_rate); } #endif } } ABSL_NAMESPACE_END }
#endif int32_t get_cordz_mean_interval() { return g_cordz_mean_interval.load(std::memory_order_acquire); }
TEST(CordzFunctionsTest, SampleRate) { int32_t orig_sample_rate = get_cordz_mean_interval(); int32_t expected_sample_rate = 123; set_cordz_mean_interval(expected_sample_rate); EXPECT_THAT(get_cordz_mean_interval(), Eq(expected_sample_rate)); set_cordz_mean_interval(orig_sample_rate); } #ifdef ABSL_INTERNAL_CORDZ_ENABLED TEST(CordzFunctionsTest, ShouldProfileDisable) { int32_t orig_sample_rate = get_cordz_mean_interval(); set_cordz_mean_interval(0); cordz_set_next_sample_for_testing(0); EXPECT_EQ(cordz_should_profile(), 0); EXPECT_THAT(cordz_next_sample.next_sample, Eq(1 << 16)); set_cordz_mean_interval(orig_sample_rate); } TEST(CordzFunctionsTest, ShouldProfileAlways) { int32_t orig_sample_rate = get_cordz_mean_interval(); set_cordz_mean_interval(1); cordz_set_next_sample_for_testing(1); EXPECT_GT(cordz_should_profile(), 0); EXPECT_THAT(cordz_next_sample.next_sample, Le(1)); set_cordz_mean_interval(orig_sample_rate); } TEST(CordzFunctionsTest, ShouldProfileRate) { static constexpr int kDesiredMeanInterval = 1000; static constexpr int kSamples = 10000; int32_t orig_sample_rate = get_cordz_mean_interval(); set_cordz_mean_interval(kDesiredMeanInterval); int64_t sum_of_intervals = 0; for (int i = 0; i < kSamples; i++) { cordz_set_next_sample_for_testing(0); cordz_should_profile(); sum_of_intervals += cordz_next_sample.next_sample; } EXPECT_THAT(sum_of_intervals, Ge(9396115)); EXPECT_THAT(sum_of_intervals, Le(10618100)); set_cordz_mean_interval(orig_sample_rate); } #else TEST(CordzFunctionsTest, ShouldProfileDisabled) { int32_t orig_sample_rate = get_cordz_mean_interval(); set_cordz_mean_interval(1); cordz_set_next_sample_for_testing(0); EXPECT_FALSE(cordz_should_profile()); set_cordz_mean_interval(orig_sample_rate); }
#include "tsl/platform/file_system.h" #include <sys/stat.h> #include <algorithm> #include <deque> #include <string> #include <utility> #include <vector> #include "tsl/platform/status.h" #if defined(PLATFORM_POSIX) || defined(IS_MOBILE_PLATFORM) || \ defined(PLATFORM_GOOGLE) #include <fnmatch.h> #else #include "tsl/platform/regexp.h" #endif #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/platform.h" #include "tsl/platform/scanner.h" #include "tsl/platform/str_util.h" #include "tsl/platform/strcat.h" namespace tsl { bool FileSystem::Match(const string& filename, const string& pattern) { #if defined(PLATFORM_POSIX) || defined(IS_MOBILE_PLATFORM) || \ defined(PLATFORM_GOOGLE) return fnmatch(pattern.c_str(), filename.c_str(), FNM_PATHNAME) == 0; #else string regexp(pattern); regexp = str_util::StringReplace(regexp, "*", "[^/]*", true); regexp = str_util::StringReplace(regexp, "?", ".", true); regexp = str_util::StringReplace(regexp, "(", "\\(", true); regexp = str_util::StringReplace(regexp, ")", "\\)", true); return RE2::FullMatch(filename, regexp); #endif } string FileSystem::TranslateName(const string& name) const { if (name.empty()) return name; StringPiece scheme, host, path; this->ParseURI(name, &scheme, &host, &path); if (path.empty()) return "/"; return this->CleanPath(path); } absl::Status FileSystem::IsDirectory(const string& name, TransactionToken* token) { TF_RETURN_IF_ERROR(FileExists(name)); FileStatistics stat; TF_RETURN_IF_ERROR(Stat(name, &stat)); if (stat.is_directory) { return absl::OkStatus(); } return absl::Status(absl::StatusCode::kFailedPrecondition, "Not a directory"); } absl::Status FileSystem::HasAtomicMove(const string& path, bool* has_atomic_move) { *has_atomic_move = true; return absl::OkStatus(); } absl::Status FileSystem::CanCreateTempFile(const std::string& fname, bool* can_create_temp_file) { *can_create_temp_file = true; return absl::OkStatus(); } void FileSystem::FlushCaches(TransactionToken* token) {} bool FileSystem::FilesExist(const std::vector<string>& files, TransactionToken* token, std::vector<absl::Status>* status) { bool result = true; for (const auto& file : files) { absl::Status s = FileExists(file); result &= s.ok(); if (status != nullptr) { status->push_back(s); } else if (!result) { return false; } } return result; } absl::Status FileSystem::DeleteRecursively(const string& dirname, TransactionToken* token, int64_t* undeleted_files, int64_t* undeleted_dirs) { CHECK_NOTNULL(undeleted_files); CHECK_NOTNULL(undeleted_dirs); *undeleted_files = 0; *undeleted_dirs = 0; absl::Status exists_status = FileExists(dirname); if (!exists_status.ok()) { (*undeleted_dirs)++; return exists_status; } if (!IsDirectory(dirname).ok()) { absl::Status delete_root_status = DeleteFile(dirname); if (!delete_root_status.ok()) (*undeleted_files)++; return delete_root_status; } std::deque<string> dir_q; std::vector<string> dir_list; dir_q.push_back(dirname); absl::Status ret; while (!dir_q.empty()) { string dir = dir_q.front(); dir_q.pop_front(); dir_list.push_back(dir); std::vector<string> children; absl::Status s = GetChildren(dir, &children); ret.Update(s); if (!s.ok()) { (*undeleted_dirs)++; continue; } for (const string& child : children) { const string child_path = this->JoinPath(dir, child); if (IsDirectory(child_path).ok()) { dir_q.push_back(child_path); } else { absl::Status del_status = DeleteFile(child_path); ret.Update(del_status); if (!del_status.ok()) { (*undeleted_files)++; } } } } std::reverse(dir_list.begin(), dir_list.end()); for (const string& dir : dir_list) { absl::Status s = DeleteDir(dir); ret.Update(s); if (!s.ok()) { (*undeleted_dirs)++; } } return ret; } absl::Status FileSystem::RecursivelyCreateDir(const string& dirname, TransactionToken* token) { StringPiece scheme, host, remaining_dir; this->ParseURI(dirname, &scheme, &host, &remaining_dir); std::vector<StringPiece> sub_dirs; while (!remaining_dir.empty()) { std::string current_entry = this->CreateURI(scheme, host, remaining_dir); absl::Status exists_status = FileExists(current_entry); if (exists_status.ok()) { absl::Status directory_status = IsDirectory(current_entry); if (directory_status.ok()) { break; } else if (directory_status.code() == absl::StatusCode::kUnimplemented) { return directory_status; } else { return errors::FailedPrecondition(remaining_dir, " is not a directory"); } } if (exists_status.code() != error::Code::NOT_FOUND) { return exists_status; } if (!str_util::EndsWith(remaining_dir, "/")) { sub_dirs.push_back(this->Basename(remaining_dir)); } remaining_dir = this->Dirname(remaining_dir); } std::reverse(sub_dirs.begin(), sub_dirs.end()); string built_path(remaining_dir); for (const StringPiece sub_dir : sub_dirs) { built_path = this->JoinPath(built_path, sub_dir); absl::Status status = CreateDir(this->CreateURI(scheme, host, built_path)); if (!status.ok() && status.code() != absl::StatusCode::kAlreadyExists) { return status; } } return absl::OkStatus(); } absl::Status FileSystem::CopyFile(const string& src, const string& target, TransactionToken* token) { return FileSystemCopyFile(this, src, this, target); } char FileSystem::Separator() const { return '/'; } string FileSystem::JoinPathImpl(std::initializer_list<StringPiece> paths) { string result; for (StringPiece path : paths) { if (path.empty()) continue; if (result.empty()) { result = string(path); continue; } if (result[result.size() - 1] == '/') { if (this->IsAbsolutePath(path)) { strings::StrAppend(&result, path.substr(1)); } else { strings::StrAppend(&result, path); } } else { if (this->IsAbsolutePath(path)) { strings::StrAppend(&result, path); } else { strings::StrAppend(&result, "/", path); } } } return result; } std::pair<StringPiece, StringPiece> FileSystem::SplitPath( StringPiece uri) const { StringPiece scheme, host, path; ParseURI(uri, &scheme, &host, &path); if (path.empty()) { return std::make_pair(StringPiece(), StringPiece()); } size_t pos = path.rfind(this->Separator()); #ifdef PLATFORM_WINDOWS size_t pos2 = path.rfind('/'); if (pos == string::npos) { pos = pos2; } else { if (pos2 != string::npos) { pos = pos > pos2 ? pos : pos2; } } #endif if (pos == StringPiece::npos) { if (host.empty()) { return std::make_pair(StringPiece(), path); } return std::make_pair(StringPiece(uri.data(), host.end() - uri.begin()), path); } if (pos == 0) { return std::make_pair( StringPiece(uri.data(), path.begin() + 1 - uri.begin()), StringPiece(path.data() + 1, path.size() - 1)); } return std::make_pair( StringPiece(uri.data(), path.begin() + pos - uri.begin()), StringPiece(path.data() + pos + 1, path.size() - (pos + 1))); } bool FileSystem::IsAbsolutePath(StringPiece path) const { return !path.empty() && path[0] == '/'; } StringPiece FileSystem::Dirname(StringPiece path) const { return this->SplitPath(path).first; } StringPiece FileSystem::Basename(StringPiece path) const { return this->SplitPath(path).second; } StringPiece FileSystem::Extension(StringPiece path) const { StringPiece basename = this->Basename(path); size_t pos = basename.rfind('.'); if (pos == StringPiece::npos) { return StringPiece(path.data() + path.size(), 0); } else { return StringPiece(path.data() + pos + 1, path.size() - (pos + 1)); } } string FileSystem::CleanPath(StringPiece unclean_path) const { string path(unclean_path); const char* src = path.c_str(); string::iterator dst = path.begin(); const bool is_absolute_path = *src == '/'; if (is_absolute_path) { *dst++ = *src++; while (*src == '/') ++src; } string::const_iterator backtrack_limit = dst; while (*src) { bool parsed = false; if (src[0] == '.') { if (src[1] == '/' || !src[1]) { if (*++src) { ++src; } parsed = true; } else if (src[1] == '.' && (src[2] == '/' || !src[2])) { src += 2; if (dst != backtrack_limit) { for (--dst; dst != backtrack_limit && dst[-1] != '/'; --dst) { } } else if (!is_absolute_path) { src -= 2; *dst++ = *src++; *dst++ = *src++; if (*src) { *dst++ = *src; } backtrack_limit = dst; } if (*src) { ++src; } parsed = true; } } if (!parsed) { while (*src && *src != '/') { *dst++ = *src++; } if (*src) { *dst++ = *src++; } } while (*src == '/') { ++src; } } string::difference_type path_length = dst - path.begin(); if (path_length != 0) { if (path_length > 1 && path[path_length - 1] == '/') { --path_length; } path.resize(path_length); } else { path.assign(1, '.'); } return path; } void FileSystem::ParseURI(StringPiece remaining, StringPiece* scheme, StringPiece* host, StringPiece* path) const { if (!strings::Scanner(remaining) .One(strings::Scanner::LETTER) .Many(strings::Scanner::LETTER_DIGIT_DOT) .StopCapture() .OneLiteral(": .GetResult(&remaining, scheme)) { *scheme = StringPiece(); *host = StringPiece(); *path = remaining; return; } if (!strings::Scanner(remaining).ScanUntil('/').GetResult(&remaining, host)) { *host = remaining; *path = StringPiece(); return; } *path = remaining; } string FileSystem::CreateURI(StringPiece scheme, StringPiece host, StringPiece path) const { if (scheme.empty()) { return string(path); } return strings::StrCat(scheme, ": } std::string FileSystem::DecodeTransaction(const TransactionToken* token) { if (token) { std::stringstream oss; oss << "Token= " << token->token << ", Owner=" << token->owner; return oss.str(); } return "No Transaction"; } }
#include "tensorflow/core/platform/file_system.h" #include <sys/stat.h> #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/null_file_system.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/str_util.h" #include "tensorflow/core/platform/strcat.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { static const char* const kPrefix = "ipfs: class InterPlanetaryFileSystem : public NullFileSystem { public: TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; Status FileExists(const string& fname, TransactionToken* token) override { string parsed_path; ParsePath(fname, &parsed_path); if (BodyExists(parsed_path)) { return absl::OkStatus(); } return Status(absl::StatusCode::kNotFound, "File does not exist"); } Status CreateDir(const string& dirname, TransactionToken* token) override { string parsed_path; ParsePath(dirname, &parsed_path); if (celestial_bodies_.find(parsed_path) != celestial_bodies_.end()) { return Status(absl::StatusCode::kAlreadyExists, "dirname already exists."); } std::vector<string> split_path = str_util::Split(parsed_path, '/'); if (split_path.size() > 3) { return Status(absl::StatusCode::kInvalidArgument, "Bad dirname"); } if (split_path.empty()) { return absl::OkStatus(); } if (split_path.size() == 1) { celestial_bodies_[""].insert(parsed_path); celestial_bodies_.insert( std::pair<string, std::set<string>>(parsed_path, {})); return absl::OkStatus(); } if (split_path.size() == 2) { if (!BodyExists(split_path[0])) { return Status(absl::StatusCode::kFailedPrecondition, "Base dir not created"); } celestial_bodies_[split_path[0]].insert(split_path[1]); celestial_bodies_.insert( std::pair<string, std::set<string>>(parsed_path, {})); return absl::OkStatus(); } if (split_path.size() == 3) { const string& parent_path = this->JoinPath(split_path[0], split_path[1]); if (!BodyExists(parent_path)) { return Status(absl::StatusCode::kFailedPrecondition, "Base dir not created"); } celestial_bodies_[parent_path].insert(split_path[2]); celestial_bodies_.insert( std::pair<string, std::set<string>>(parsed_path, {})); return absl::OkStatus(); } return Status(absl::StatusCode::kFailedPrecondition, "Failed to create"); } Status IsDirectory(const string& dirname, TransactionToken* token) override { string parsed_path; ParsePath(dirname, &parsed_path); if (parsed_path == "evil_directory") { LOG(FATAL) << "evil_directory cannot be accessed"; } std::vector<string> split_path = str_util::Split(parsed_path, '/'); if (split_path.size() > 2) { return Status(absl::StatusCode::kFailedPrecondition, "Not a dir"); } if (celestial_bodies_.find(parsed_path) != celestial_bodies_.end()) { return absl::OkStatus(); } return Status(absl::StatusCode::kFailedPrecondition, "Not a dir"); } Status GetChildren(const string& dir, TransactionToken* token, std::vector<string>* result) override { TF_RETURN_IF_ERROR(IsDirectory(dir, nullptr)); string parsed_path; ParsePath(dir, &parsed_path); result->insert(result->begin(), celestial_bodies_[parsed_path].begin(), celestial_bodies_[parsed_path].end()); return absl::OkStatus(); } private: bool BodyExists(const string& name) { return celestial_bodies_.find(name) != celestial_bodies_.end(); } void ParsePath(const string& name, string* parsed_path) { StringPiece scheme, host, path; this->ParseURI(name, &scheme, &host, &path); ASSERT_EQ(scheme, "ipfs"); ASSERT_EQ(host, "solarsystem"); absl::ConsumePrefix(&path, "/"); *parsed_path = string(path); } std::map<string, std::set<string>> celestial_bodies_ = { std::pair<string, std::set<string>>( "", {"Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"}), std::pair<string, std::set<string>>("Mercury", {}), std::pair<string, std::set<string>>("Venus", {}), std::pair<string, std::set<string>>("Earth", {"Moon"}), std::pair<string, std::set<string>>("Mars", {}), std::pair<string, std::set<string>>("Jupiter", {"Europa", "Io", "Ganymede"}), std::pair<string, std::set<string>>("Saturn", {}), std::pair<string, std::set<string>>("Uranus", {}), std::pair<string, std::set<string>>("Neptune", {}), std::pair<string, std::set<string>>("Earth/Moon", {}), std::pair<string, std::set<string>>("Jupiter/Europa", {}), std::pair<string, std::set<string>>("Jupiter/Io", {}), std::pair<string, std::set<string>>("Jupiter/Ganymede", {})}; }; string Match(InterPlanetaryFileSystem* ipfs, const string& suffix_pattern) { std::vector<string> results; Status s = ipfs->GetMatchingPaths(ipfs->JoinPath(kPrefix, suffix_pattern), nullptr, &results); if (!s.ok()) { return s.ToString(); } else { std::vector<StringPiece> trimmed_results; std::sort(results.begin(), results.end()); for (const string& result : results) { StringPiece trimmed_result(result); EXPECT_TRUE( absl::ConsumePrefix(&trimmed_result, strings::StrCat(kPrefix, "/"))); trimmed_results.push_back(trimmed_result); } return absl::StrJoin(trimmed_results, ","); } } TEST(InterPlanetaryFileSystemTest, IPFSMatch) { InterPlanetaryFileSystem ipfs; EXPECT_EQ(Match(&ipfs, "thereisnosuchfile"), ""); EXPECT_EQ(Match(&ipfs, "*"), "Earth,Jupiter,Mars,Mercury,Neptune,Saturn,Uranus,Venus"); EXPECT_EQ(Match(&ipfs, "Jupiter*"), "Earth/Moon,Jupiter/Europa,Jupiter/Ganymede,Jupiter/Io"); TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "Planet0"), nullptr)); TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "Planet1"), nullptr)); EXPECT_EQ(Match(&ipfs, "Planet[0-1]"), "Planet0,Planet1"); EXPECT_EQ(Match(&ipfs, "Planet?"), "Planet0,Planet1"); } TEST(InterPlanetaryFileSystemTest, MatchSimple) { InterPlanetaryFileSystem ipfs; TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-00"), nullptr)); TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-0a"), nullptr)); TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-01"), nullptr)); TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-aaa"), nullptr)); EXPECT_EQ(Match(&ipfs, "match-*"), "match-00,match-01,match-0a,match-aaa"); EXPECT_EQ(Match(&ipfs, "match-0[0-9]"), "match-00,match-01"); EXPECT_EQ(Match(&ipfs, "match-?[0-9]"), "match-00,match-01"); EXPECT_EQ(Match(&ipfs, "match-?a*"), "match-0a,match-aaa"); EXPECT_EQ(Match(&ipfs, "match-??"), "match-00,match-01,match-0a"); } TEST(InterPlanetaryFileSystemTest, MatchOnlyNeeded) { InterPlanetaryFileSystem ipfs; TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "abcd"), nullptr)); TF_EXPECT_OK( ipfs.CreateDir(ipfs.JoinPath(kPrefix, "evil_directory"), nullptr)); EXPECT_EQ(Match(&ipfs, "abcd"), "abcd"); } TEST(InterPlanetaryFileSystemTest, MatchDirectory) { InterPlanetaryFileSystem ipfs; TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-00/abc/x"), nullptr)); TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-0a/abc/x"), nullptr)); TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-01/abc/x"), nullptr)); TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-aaa/abc/x"), nullptr)); EXPECT_EQ(Match(&ipfs, "match-*/abc/x"), "match-00/abc/x,match-01/abc/x,match-0a/abc/x,match-aaa/abc/x"); EXPECT_EQ(Match(&ipfs, "match-0[0-9]/abc/x"), "match-00/abc/x,match-01/abc/x"); EXPECT_EQ(Match(&ipfs, "match-?[0-9]/abc/x"), "match-00/abc/x,match-01/abc/x"); EXPECT_EQ(Match(&ipfs, "match-?a*/abc/x"), "match-0a/abc/x,match-aaa/abc/x"); EXPECT_EQ(Match(&ipfs, "match-?[^a]/abc/x"), "match-00/abc/x,match-01/abc/x"); } TEST(InterPlanetaryFileSystemTest, MatchMultipleWildcards) { InterPlanetaryFileSystem ipfs; TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-00/abc/00"), nullptr)); TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-00/abc/01"), nullptr)); TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-00/abc/09"), nullptr)); TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-01/abc/00"), nullptr)); TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-01/abc/04"), nullptr)); TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-01/abc/10"), nullptr)); TF_EXPECT_OK(ipfs.RecursivelyCreateDir( ipfs.JoinPath(kPrefix, "match-02/abc/00"), nullptr)); EXPECT_EQ(Match(&ipfs, "match-0[0-1]/abc/0[0-8]"), "match-00/abc/00,match-00/abc/01,match-01/abc/00,match-01/abc/04"); } TEST(InterPlanetaryFileSystemTest, RecursivelyCreateAlreadyExistingDir) { InterPlanetaryFileSystem ipfs; const string dirname = ipfs.JoinPath(kPrefix, "match-00/abc/00"); TF_EXPECT_OK(ipfs.RecursivelyCreateDir(dirname)); } TEST(InterPlanetaryFileSystemTest, HasAtomicMove) { InterPlanetaryFileSystem ipfs; const string dirname = io::JoinPath(kPrefix, "match-00/abc/00"); bool has_atomic_move; TF_EXPECT_OK(ipfs.HasAtomicMove(dirname, &has_atomic_move)); EXPECT_EQ(has_atomic_move, true); } TEST(InterPlanetaryFileSystemTest, CanCreateTempFile) { InterPlanetaryFileSystem ipfs; const string dirname = io::JoinPath(kPrefix, "match-00/abc/00"); bool can_create_temp_file; TF_EXPECT_OK(ipfs.CanCreateTempFile(dirname, &can_create_temp_file)); EXPECT_EQ(can_create_temp_file, true); } class TestFileSystem : public NullFileSystem { public: Status IsDirectory(const string& dirname, TransactionToken* token) override { if (dirname == "." || dirname.empty()) { return absl::OkStatus(); } return Status(absl::StatusCode::kFailedPrecondition, "Not a dir"); } Status GetChildren(const string& dir, TransactionToken* token, std::vector<string>* result) override { if (dir == "." || dir.empty()) { result->push_back("test"); } return absl::OkStatus(); } }; TEST(TestFileSystemTest, RootDirectory) { TestFileSystem fs; std::vector<string> results; auto ret = fs.GetMatchingPaths("./te*", nullptr, &results); EXPECT_EQ(1, results.size()); EXPECT_EQ("./test", results[0]); ret = fs.GetMatchingPaths("te*", nullptr, &results); EXPECT_EQ(1, results.size()); EXPECT_EQ("./test", results[0]); } }
absl::Status FileSystem::CanCreateTempFile(const std::string& fname, bool* can_create_temp_file) { *can_create_temp_file = true; return absl::OkStatus(); }
TEST(InterPlanetaryFileSystemTest, CanCreateTempFile) { InterPlanetaryFileSystem ipfs; const string dirname = io::JoinPath(kPrefix, "match-00/abc/00"); bool can_create_temp_file; TF_EXPECT_OK(ipfs.CanCreateTempFile(dirname, &can_create_temp_file)); EXPECT_EQ(can_create_temp_file, true); }
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.h" #include <algorithm> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/strings/str_cat.h" #include "absl/time/time.h" #include "flatbuffers/flatbuffer_builder.h" #include "tensorflow/lite/acceleration/configuration/configuration_generated.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h" #include "tensorflow/lite/logger.h" #include "tensorflow/lite/minimal_logging.h" namespace tflite { namespace acceleration { namespace { using ::flatbuffers::FlatBufferBuilder; using ::flatbuffers::GetRoot; constexpr absl::Duration kWaitBetweenRefresh = absl::Milliseconds(20); std::string GenerateRandomString() { static const char charset[] = "0123456789" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"; const int size = 10; std::string result; result.resize(size); for (int i = 0; i < size; ++i) { result[i] = charset[rand() % (sizeof(charset) - 1)]; } return result; } } BlockingValidatorRunner::BlockingValidatorRunner( const ValidatorRunnerOptions& options) : per_test_timeout_ms_(options.per_test_timeout_ms), storage_path_base_(options.storage_path) { validator_runner_impl_ = std::make_unique<ValidatorRunnerImpl>( CreateModelLoaderPath(options), options.storage_path, options.data_directory_path, options.per_test_timeout_ms, options.custom_input_data.empty() ? nullptr : std::make_unique<CustomValidationEmbedder>( options.custom_input_batch_size, options.custom_input_data, options.error_reporter), options.error_reporter, options.nnapi_sl, options.gpu_plugin_handle, options.validation_entrypoint_name, options.benchmark_result_evaluator); } MinibenchmarkStatus BlockingValidatorRunner::Init() { return validator_runner_impl_->Init(); } std::vector<FlatBufferBuilder> BlockingValidatorRunner::TriggerValidation( const std::vector<const TFLiteSettings*>& for_settings) { if (for_settings.empty()) { return {}; } std::string storage_path = absl::StrCat(storage_path_base_, ".", GenerateRandomString()); TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Validation storage path: %s", storage_path.c_str()); std::vector<flatbuffers::FlatBufferBuilder> to_be_run; std::vector<TFLiteSettingsT> for_settings_obj; for_settings_obj.reserve(for_settings.size()); for (auto settings : for_settings) { TFLiteSettingsT tflite_settings; settings->UnPackTo(&tflite_settings); flatbuffers::FlatBufferBuilder copy; copy.Finish(CreateTFLiteSettings(copy, &tflite_settings)); to_be_run.emplace_back(std::move(copy)); for_settings_obj.emplace_back(tflite_settings); } validator_runner_impl_->TriggerValidationAsync(std::move(to_be_run), storage_path); int64_t total_timeout_ms = per_test_timeout_ms_ * (1 + for_settings.size()); int64_t deadline_us = Validator::BootTimeMicros() + total_timeout_ms * 1000; bool within_timeout = true; while ((validator_runner_impl_->GetNumCompletedResults()) < for_settings.size() && (within_timeout = Validator::BootTimeMicros() < deadline_us)) { usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh)); } std::vector<FlatBufferBuilder> results = validator_runner_impl_->GetCompletedResults(); if (!within_timeout) { TFLITE_LOG_PROD( TFLITE_LOG_WARNING, "Validation timed out after %ld ms. Return before all tests finished.", total_timeout_ms); } else if (for_settings.size() != results.size()) { TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Validation completed.Started benchmarking for %d " "TFLiteSettings, received %d results.", for_settings.size(), results.size()); } std::vector<TFLiteSettingsT> result_settings; result_settings.reserve(results.size()); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); TFLiteSettingsT event_settings; event->tflite_settings()->UnPackTo(&event_settings); result_settings.emplace_back(std::move(event_settings)); } for (auto& settings_obj : for_settings_obj) { auto result_it = std::find(result_settings.begin(), result_settings.end(), settings_obj); if (result_it == result_settings.end()) { FlatBufferBuilder fbb; fbb.Finish(CreateBenchmarkEvent( fbb, CreateTFLiteSettings(fbb, &settings_obj), BenchmarkEventType_ERROR, 0, CreateBenchmarkError(fbb, BenchmarkStage_UNKNOWN, 0, 0, 0, kMinibenchmarkCompletionEventMissing), Validator::BootTimeMicros(), Validator::WallTimeMicros())); results.emplace_back(std::move(fbb)); } } (void)unlink(storage_path.c_str()); return results; } } }
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.h" #include <fcntl.h> #include <iostream> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/str_cat.h" #include "flatbuffers/buffer.h" #include "flatbuffers/flatbuffer_builder.h" #include "tensorflow/lite/acceleration/configuration/configuration_generated.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h" namespace tflite { namespace acceleration { namespace { using ::flatbuffers::FlatBufferBuilder; using ::flatbuffers::GetRoot; class CustomResultEvaluator : public AbstractBenchmarkResultEvaluator { public: bool HasPassedAccuracyCheck(const BenchmarkResult& result) override { return true; } }; class BlockingValidatorRunnerTest : public ::testing::Test { protected: void SetUp() override { MiniBenchmarkTestHelper helper; should_perform_test_ = helper.should_perform_test(); options_.model_path = helper.DumpToTempFile( "mobilenet_quant_with_validation.tflite", g_tflite_acceleration_embedded_mobilenet_validation_model, g_tflite_acceleration_embedded_mobilenet_validation_model_len); ASSERT_TRUE(!options_.model_path.empty()); options_.data_directory_path = ::testing::TempDir(); options_.storage_path = absl::StrCat(::testing::TempDir(), "storage_path.fb.1"); options_.per_test_timeout_ms = 5000; plain_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile( "mobilenet_quant.tflite", g_tflite_acceleration_embedded_mobilenet_model, g_tflite_acceleration_embedded_mobilenet_model_len); } std::string plain_model_path_; ValidatorRunnerOptions options_; bool should_perform_test_ = true; }; TEST_F(BlockingValidatorRunnerTest, SucceedWithEmbeddedValidation) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; #ifdef __ANDROID__ fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU)); #else fbb.Finish(CreateTFLiteSettings(fbb)); #endif std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); EXPECT_TRUE(event->result()->ok()); } } TEST_F(BlockingValidatorRunnerTest, SucceedWithFdCloexecEmbeddedValidation) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } options_.model_fd = open(options_.model_path.c_str(), O_RDONLY | O_CLOEXEC); ASSERT_GE(options_.model_fd, 0); struct stat stat_buf = {0}; ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0); options_.model_size = stat_buf.st_size; options_.model_offset = 0; options_.model_path.clear(); BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; #ifdef __ANDROID__ fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU)); #else fbb.Finish(CreateTFLiteSettings(fbb)); #endif std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); EXPECT_TRUE(event->result()->ok()); } } TEST_F(BlockingValidatorRunnerTest, SucceedWithBufferModel) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } options_.model_buffer = g_tflite_acceleration_embedded_mobilenet_validation_model; options_.model_size = g_tflite_acceleration_embedded_mobilenet_validation_model_len; options_.model_path.clear(); BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; fbb.Finish(CreateTFLiteSettings(fbb)); std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); EXPECT_TRUE(event->result()->ok()); } } TEST_F(BlockingValidatorRunnerTest, SucceedWithFdModelCustomValidation) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } options_.model_path.clear(); options_.model_fd = open(plain_model_path_.c_str(), O_RDONLY); ASSERT_GE(options_.model_fd, 0); struct stat stat_buf = {0}; ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0); options_.model_size = stat_buf.st_size; options_.model_offset = 0; options_.custom_input_batch_size = 3; options_.custom_input_data = {std::vector<uint8_t>(3 * 224 * 224 * 3, 1)}; CustomResultEvaluator evaluator; options_.benchmark_result_evaluator = &evaluator; BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; #ifdef __ANDROID__ fbb.Finish(CreateTFLiteSettings(fbb, Delegate_XNNPACK)); #else fbb.Finish(CreateTFLiteSettings(fbb)); #endif std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); } } TEST_F(BlockingValidatorRunnerTest, SucceedWhenRunningMultipleTimes) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; fbb.Finish(CreateTFLiteSettings(fbb)); int num_runs = 3; for (int i = 0; i < num_runs; i++) { std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer()), flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); EXPECT_TRUE(event->result()->ok()); } } } TEST_F(BlockingValidatorRunnerTest, ReturnErrorWhenTimedOut) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } options_.per_test_timeout_ms = 50; BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; fbb.Finish(CreateTFLiteSettings(fbb)); std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::SizeIs(1)); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_ERROR); ASSERT_NE(nullptr, event->error()); EXPECT_THAT(event->error()->mini_benchmark_error_code(), testing::AnyOf(kMinibenchmarkCommandTimedOut, kMinibenchmarkCompletionEventMissing)); } } } } }
std::vector<FlatBufferBuilder> BlockingValidatorRunner::TriggerValidation( const std::vector<const TFLiteSettings*>& for_settings) { if (for_settings.empty()) { return {}; } std::string storage_path = absl::StrCat(storage_path_base_, ".", GenerateRandomString()); TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Validation storage path: %s", storage_path.c_str()); std::vector<flatbuffers::FlatBufferBuilder> to_be_run; std::vector<TFLiteSettingsT> for_settings_obj; for_settings_obj.reserve(for_settings.size()); for (auto settings : for_settings) { TFLiteSettingsT tflite_settings; settings->UnPackTo(&tflite_settings); flatbuffers::FlatBufferBuilder copy; copy.Finish(CreateTFLiteSettings(copy, &tflite_settings)); to_be_run.emplace_back(std::move(copy)); for_settings_obj.emplace_back(tflite_settings); } validator_runner_impl_->TriggerValidationAsync(std::move(to_be_run), storage_path); int64_t total_timeout_ms = per_test_timeout_ms_ * (1 + for_settings.size()); int64_t deadline_us = Validator::BootTimeMicros() + total_timeout_ms * 1000; bool within_timeout = true; while ((validator_runner_impl_->GetNumCompletedResults()) < for_settings.size() && (within_timeout = Validator::BootTimeMicros() < deadline_us)) { usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh)); } std::vector<FlatBufferBuilder> results = validator_runner_impl_->GetCompletedResults(); if (!within_timeout) { TFLITE_LOG_PROD( TFLITE_LOG_WARNING, "Validation timed out after %ld ms. Return before all tests finished.", total_timeout_ms); } else if (for_settings.size() != results.size()) { TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Validation completed.Started benchmarking for %d " "TFLiteSettings, received %d results.", for_settings.size(), results.size()); } std::vector<TFLiteSettingsT> result_settings; result_settings.reserve(results.size()); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); TFLiteSettingsT event_settings; event->tflite_settings()->UnPackTo(&event_settings); result_settings.emplace_back(std::move(event_settings)); } for (auto& settings_obj : for_settings_obj) { auto result_it = std::find(result_settings.begin(), result_settings.end(), settings_obj); if (result_it == result_settings.end()) { FlatBufferBuilder fbb; fbb.Finish(CreateBenchmarkEvent( fbb, CreateTFLiteSettings(fbb, &settings_obj), BenchmarkEventType_ERROR, 0, CreateBenchmarkError(fbb, BenchmarkStage_UNKNOWN, 0, 0, 0, kMinibenchmarkCompletionEventMissing), Validator::BootTimeMicros(), Validator::WallTimeMicros())); results.emplace_back(std::move(fbb)); } } (void)unlink(storage_path.c_str()); return results; }
TEST_F(BlockingValidatorRunnerTest, SucceedWithEmbeddedValidation) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; #ifdef __ANDROID__ fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU)); #else fbb.Finish(CreateTFLiteSettings(fbb)); #endif std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); EXPECT_TRUE(event->result()->ok()); } } TEST_F(BlockingValidatorRunnerTest, SucceedWithFdCloexecEmbeddedValidation) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } options_.model_fd = open(options_.model_path.c_str(), O_RDONLY | O_CLOEXEC); ASSERT_GE(options_.model_fd, 0); struct stat stat_buf = {0}; ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0); options_.model_size = stat_buf.st_size; options_.model_offset = 0; options_.model_path.clear(); BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; #ifdef __ANDROID__ fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU)); #else fbb.Finish(CreateTFLiteSettings(fbb)); #endif std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); EXPECT_TRUE(event->result()->ok()); } } TEST_F(BlockingValidatorRunnerTest, SucceedWithBufferModel) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } options_.model_buffer = g_tflite_acceleration_embedded_mobilenet_validation_model; options_.model_size = g_tflite_acceleration_embedded_mobilenet_validation_model_len; options_.model_path.clear(); BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; fbb.Finish(CreateTFLiteSettings(fbb)); std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); EXPECT_TRUE(event->result()->ok()); } } TEST_F(BlockingValidatorRunnerTest, SucceedWithFdModelCustomValidation) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } options_.model_path.clear(); options_.model_fd = open(plain_model_path_.c_str(), O_RDONLY); ASSERT_GE(options_.model_fd, 0); struct stat stat_buf = {0}; ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0); options_.model_size = stat_buf.st_size; options_.model_offset = 0; options_.custom_input_batch_size = 3; options_.custom_input_data = {std::vector<uint8_t>(3 * 224 * 224 * 3, 1)}; CustomResultEvaluator evaluator; options_.benchmark_result_evaluator = &evaluator; BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; #ifdef __ANDROID__ fbb.Finish(CreateTFLiteSettings(fbb, Delegate_XNNPACK)); #else fbb.Finish(CreateTFLiteSettings(fbb)); #endif std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); } } TEST_F(BlockingValidatorRunnerTest, SucceedWhenRunningMultipleTimes) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; fbb.Finish(CreateTFLiteSettings(fbb)); int num_runs = 3; for (int i = 0; i < num_runs; i++) { std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer()), flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::Not(testing::IsEmpty())); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_END); EXPECT_TRUE(event->result()->ok()); } } } TEST_F(BlockingValidatorRunnerTest, ReturnErrorWhenTimedOut) { if (!should_perform_test_) { std::cerr << "Skipping test"; return; } options_.per_test_timeout_ms = 50; BlockingValidatorRunner runner(options_); ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess); FlatBufferBuilder fbb; fbb.Finish(CreateTFLiteSettings(fbb)); std::vector<FlatBufferBuilder> results = runner.TriggerValidation( {flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())}); EXPECT_THAT(results, testing::SizeIs(1)); for (auto& result : results) { const BenchmarkEvent* event = GetRoot<BenchmarkEvent>(result.GetBufferPointer()); EXPECT_EQ(event->event_type(), BenchmarkEventType_ERROR); ASSERT_NE(nullptr, event->error()); EXPECT_THAT(event->error()->mini_benchmark_error_code(), testing::AnyOf(kMinibenchmarkCommandTimedOut, kMinibenchmarkCompletionEventMissing)); } }
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h" #ifndef _WIN32 #include <fcntl.h> #include <sys/file.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #endif #include <string> namespace tflite { namespace acceleration { bool FileLock::TryLock() { #ifndef _WIN32 if (fd_ < 0) { fd_ = open(path_.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0600); } if (fd_ < 0) { return false; } if (flock(fd_, LOCK_EX | LOCK_NB) == 0) { return true; } #endif return false; } } }
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h" #include <csignal> #include <iostream> #include <string> #include <utility> #include <gtest/gtest.h> namespace tflite { namespace acceleration { namespace { class FileLockTest : public ::testing::Test { protected: void SetUp() override { file_path_ = ::testing::TempDir() + "/file_lock"; } std::string file_path_; }; TEST_F(FileLockTest, CanLock) { EXPECT_TRUE(FileLock(file_path_).TryLock()); } TEST_F(FileLockTest, FailIfLockMoreThanOnce) { FileLock lock_one(file_path_); FileLock lock_two(file_path_); ASSERT_TRUE(lock_one.TryLock()); EXPECT_FALSE(lock_two.TryLock()); } TEST_F(FileLockTest, LockReleasedWhenThreadCrash) { pid_t pid = fork(); if (pid == 0) { FileLock lock(file_path_); if (!lock.TryLock()) { _exit(1); } std::cout << "Lock acquired successfully."; kill(getpid(), SIGKILL); } int wstatus; int w = waitpid(pid, &wstatus, WUNTRACED); ASSERT_NE(w, -1); FileLock lock_two(file_path_); EXPECT_TRUE(lock_two.TryLock()); } } } }
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h" #ifndef _WIN32 #include <fcntl.h> #include <sys/file.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #endif #include <string> namespace tflite { namespace acceleration { bool FileLock::TryLock() { #ifndef _WIN32 if (fd_ < 0) { fd_ = open(path_.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0600); } if (fd_ < 0) { return false; } if (flock(fd_, LOCK_EX | LOCK_NB) == 0) { return true; } #endif return false; }
TEST_F(FileLockTest, CanLock) { EXPECT_TRUE(FileLock(file_path_).TryLock()); } TEST_F(FileLockTest, FailIfLockMoreThanOnce) { FileLock lock_one(file_path_); FileLock lock_two(file_path_); ASSERT_TRUE(lock_one.TryLock()); EXPECT_FALSE(lock_two.TryLock()); } TEST_F(FileLockTest, LockReleasedWhenThreadCrash) { pid_t pid = fork(); if (pid == 0) { FileLock lock(file_path_); if (!lock.TryLock()) { _exit(1); } std::cout << "Lock acquired successfully."; kill(getpid(), SIGKILL); } int wstatus; int w = waitpid(pid, &wstatus, WUNTRACED); ASSERT_NE(w, -1); FileLock lock_two(file_path_); EXPECT_TRUE(lock_two.TryLock()); }
#include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h" #include <atomic> #include <csignal> #include <functional> #include <memory> #include <utility> #include "absl/synchronization/notification.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/mutex.h" #include "tsl/platform/statusor.h" #if defined(PLATFORM_GOOGLE) #include "thread/executor.h" #include "thread/signal.h" #endif namespace tsl { namespace { constexpr absl::Duration kListenInterval = absl::Seconds(1); constexpr absl::Time kUnsetDeathTime = absl::InfinitePast(); static std::atomic_bool sigterm_received(false); class SigtermNotifier : public PreemptionNotifier { public: explicit SigtermNotifier(Env* env); ~SigtermNotifier() override { shutdown_notification_.Notify(); } private: void StartListenerThread(); absl::Notification shutdown_notification_; std::unique_ptr<Thread> preempt_listener_thread_; }; SigtermNotifier::SigtermNotifier(Env* env) : PreemptionNotifier(env) { sigterm_received.store(false); StartListenerThread(); #if defined(PLATFORM_GOOGLE) thread::signal::Token unused_token; thread::signal::AddHandler( SIGTERM, thread::Executor::DefaultExecutor(), []() { sigterm_received.store(true); }, 0, &unused_token); #else std::signal(SIGTERM, [](int signal) { sigterm_received.store(true); }); #endif } void SigtermNotifier::StartListenerThread() { preempt_listener_thread_.reset( GetEnv()->StartThread({}, "PreemptionNotifier_Listen", [this]() { while (!sigterm_received.load()) { if (shutdown_notification_.WaitForNotificationWithTimeout( kListenInterval)) { NotifyRegisteredListeners( errors::Cancelled("Preemption notifier is being deleted.")); return; } } const absl::Time death_time = absl::Now(); LOG(WARNING) << "SIGTERM caught at " << death_time; NotifyRegisteredListeners(death_time); })); } } absl::StatusOr<absl::Time> PreemptionNotifier::WillBePreemptedAt() { absl::Notification n; absl::StatusOr<absl::Time> result; WillBePreemptedAtAsync( [&n, &result](absl::StatusOr<absl::Time> async_result) { result = async_result; n.Notify(); }); n.WaitForNotification(); return result; } void PreemptionNotifier::WillBePreemptedAtAsync(PreemptTimeCallback callback) { mutex_lock l(mu_); if (death_time_ == kUnsetDeathTime) { callbacks_.push_back(std::move(callback)); } else { callback(death_time_); } } void PreemptionNotifier::NotifyRegisteredListeners( absl::StatusOr<absl::Time> death_time) { mutex_lock l(mu_); if (death_time.ok()) { death_time_ = death_time.value(); } for (const auto& callback : callbacks_) { callback(death_time); } callbacks_.clear(); } REGISTER_PREEMPTION_NOTIFIER( "sigterm", [](Env* env) -> std::unique_ptr<PreemptionNotifier> { return std::make_unique<SigtermNotifier>(env); }); }
#include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h" #include <csignal> #include <functional> #include <memory> #include <utility> #include "absl/synchronization/notification.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #if defined(PLATFORM_GOOGLE) #include "thread/executor.h" #include "thread/signal.h" #endif namespace tsl { namespace { class PreemptNotifierTest : public ::testing::Test { public: PreemptNotifierTest() { #if defined(PLATFORM_GOOGLE) thread::signal::Token unused_token; thread::signal::AddHandler( SIGTERM, thread::Executor::DefaultExecutor(), []() {}, thread::signal::kOverrideDefault, &unused_token); #endif } }; TEST_F(PreemptNotifierTest, WillBePreemptedAt) { auto env = Env::Default(); std::unique_ptr<PreemptionNotifier> preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); absl::Time start_time = absl::Now(); env->SchedClosureAfter(absl::ToInt64Microseconds(absl::Seconds(1)), []() { std::raise(SIGTERM); }); absl::StatusOr<absl::Time> result = preempt_notifier->WillBePreemptedAt(); TF_CHECK_OK(result.status()); absl::Time preempt_time = result.value(); absl::Duration time_diff = preempt_time - start_time; EXPECT_GT(time_diff, absl::Seconds(1.0)); EXPECT_LT(time_diff, absl::Seconds(3)); } TEST_F(PreemptNotifierTest, WillBePreemptedAt_AlreadyPreempted_ReturnsImmediately) { auto env = Env::Default(); std::unique_ptr<PreemptionNotifier> preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); absl::Time start_time = absl::Now(); std::raise(SIGTERM); env->SleepForMicroseconds(absl::ToInt64Microseconds(absl::Seconds(2))); absl::StatusOr<absl::Time> result = preempt_notifier->WillBePreemptedAt(); TF_CHECK_OK(result.status()); absl::Time preempt_time = result.value(); absl::Duration time_diff = preempt_time - start_time; EXPECT_GT(time_diff, absl::ZeroDuration()); EXPECT_LT(time_diff, absl::Seconds(2)); } TEST_F(PreemptNotifierTest, WillBePreemptedAtAsync_SameResultForAllCallbacks) { auto env = Env::Default(); std::unique_ptr<PreemptionNotifier> preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); env->SchedClosureAfter(absl::ToInt64Microseconds(absl::Seconds(1)), []() { std::raise(SIGTERM); }); absl::StatusOr<absl::Time> preempt_time; absl::StatusOr<absl::Time> preempt_time_2; absl::Notification n; absl::Notification n_2; preempt_notifier->WillBePreemptedAtAsync( [&preempt_time, &n](absl::StatusOr<absl::Time> result) { preempt_time = result; n.Notify(); }); preempt_notifier->WillBePreemptedAtAsync( [&preempt_time_2, &n_2](absl::StatusOr<absl::Time> result) { preempt_time_2 = result; n_2.Notify(); }); n.WaitForNotification(); n_2.WaitForNotification(); TF_CHECK_OK(preempt_time.status()); TF_CHECK_OK(preempt_time_2.status()); EXPECT_EQ(preempt_time.value(), preempt_time_2.value()); } TEST_F(PreemptNotifierTest, Reset_TwoDifferentPreemptTimesRecorded) { auto env = Env::Default(); std::unique_ptr<PreemptionNotifier> preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); std::raise(SIGTERM); absl::StatusOr<absl::Time> result = preempt_notifier->WillBePreemptedAt(); TF_CHECK_OK(result.status()); absl::Time preempt_time = result.value(); preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); std::raise(SIGTERM); absl::Time preempt_time_2 = preempt_notifier->WillBePreemptedAt().value(); EXPECT_NE(preempt_time, preempt_time_2); } TEST_F(PreemptNotifierTest, DestructorCancelsPendingCalls) { auto env = Env::Default(); std::unique_ptr<PreemptionNotifier> preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); absl::StatusOr<absl::Time> result; absl::Notification n; preempt_notifier->WillBePreemptedAtAsync( [&result, &n](absl::StatusOr<absl::Time> status_or_time) { result = status_or_time; n.Notify(); }); preempt_notifier = nullptr; n.WaitForNotification(); EXPECT_TRUE(errors::IsCancelled(result.status())); } } }
void PreemptionNotifier::NotifyRegisteredListeners( absl::StatusOr<absl::Time> death_time) { mutex_lock l(mu_); if (death_time.ok()) { death_time_ = death_time.value(); } for (const auto& callback : callbacks_) { callback(death_time); } callbacks_.clear(); }
TEST_F(PreemptNotifierTest, WillBePreemptedAt) { auto env = Env::Default(); std::unique_ptr<PreemptionNotifier> preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); absl::Time start_time = absl::Now(); env->SchedClosureAfter(absl::ToInt64Microseconds(absl::Seconds(1)), []() { std::raise(SIGTERM); }); absl::StatusOr<absl::Time> result = preempt_notifier->WillBePreemptedAt(); TF_CHECK_OK(result.status()); absl::Time preempt_time = result.value(); absl::Duration time_diff = preempt_time - start_time; EXPECT_GT(time_diff, absl::Seconds(1.0)); EXPECT_LT(time_diff, absl::Seconds(3)); } TEST_F(PreemptNotifierTest, WillBePreemptedAt_AlreadyPreempted_ReturnsImmediately) { auto env = Env::Default(); std::unique_ptr<PreemptionNotifier> preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); absl::Time start_time = absl::Now(); std::raise(SIGTERM); env->SleepForMicroseconds(absl::ToInt64Microseconds(absl::Seconds(2))); absl::StatusOr<absl::Time> result = preempt_notifier->WillBePreemptedAt(); TF_CHECK_OK(result.status()); absl::Time preempt_time = result.value(); absl::Duration time_diff = preempt_time - start_time; EXPECT_GT(time_diff, absl::ZeroDuration()); EXPECT_LT(time_diff, absl::Seconds(2)); } TEST_F(PreemptNotifierTest, WillBePreemptedAtAsync_SameResultForAllCallbacks) { auto env = Env::Default(); std::unique_ptr<PreemptionNotifier> preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); env->SchedClosureAfter(absl::ToInt64Microseconds(absl::Seconds(1)), []() { std::raise(SIGTERM); }); absl::StatusOr<absl::Time> preempt_time; absl::StatusOr<absl::Time> preempt_time_2; absl::Notification n; absl::Notification n_2; preempt_notifier->WillBePreemptedAtAsync( [&preempt_time, &n](absl::StatusOr<absl::Time> result) { preempt_time = result; n.Notify(); }); preempt_notifier->WillBePreemptedAtAsync( [&preempt_time_2, &n_2](absl::StatusOr<absl::Time> result) { preempt_time_2 = result; n_2.Notify(); }); n.WaitForNotification(); n_2.WaitForNotification(); TF_CHECK_OK(preempt_time.status()); TF_CHECK_OK(preempt_time_2.status()); EXPECT_EQ(preempt_time.value(), preempt_time_2.value()); } TEST_F(PreemptNotifierTest, Reset_TwoDifferentPreemptTimesRecorded) { auto env = Env::Default(); std::unique_ptr<PreemptionNotifier> preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); std::raise(SIGTERM); absl::StatusOr<absl::Time> result = preempt_notifier->WillBePreemptedAt(); TF_CHECK_OK(result.status()); absl::Time preempt_time = result.value(); preempt_notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); std::raise(SIGTERM); absl::Time preempt_time_2 = preempt_notifier->WillBePreemptedAt().value(); EXPECT_NE(preempt_time, preempt_time_2); }
#ifndef TENSORFLOW_TSL_LIB_GTL_MAP_UTIL_H_ #define TENSORFLOW_TSL_LIB_GTL_MAP_UTIL_H_ #include <stddef.h> #include <iterator> #include <memory> #include <string> #include <utility> #include "tsl/lib/gtl/subtle/map_traits.h" namespace tsl { namespace gtl { template <class Collection> const typename Collection::value_type::second_type* FindOrNull( const Collection& collection, const typename Collection::value_type::first_type& key) { typename Collection::const_iterator it = collection.find(key); if (it == collection.end()) { return nullptr; } return &it->second; } template <class Collection> typename Collection::value_type::second_type* FindOrNull( Collection& collection, const typename Collection::value_type::first_type& key) { typename Collection::iterator it = collection.find(key); if (it == collection.end()) { return nullptr; } return &it->second; } template <class Collection> typename Collection::value_type::second_type FindPtrOrNull( const Collection& collection, const typename Collection::value_type::first_type& key) { typename Collection::const_iterator it = collection.find(key); if (it == collection.end()) { return typename Collection::value_type::second_type(); } return it->second; } template <class Collection> const typename Collection::value_type::second_type& FindWithDefault( const Collection& collection, const typename Collection::value_type::first_type& key, const typename Collection::value_type::second_type& value) { typename Collection::const_iterator it = collection.find(key); if (it == collection.end()) { return value; } return it->second; } template <class Collection> bool InsertOrUpdate(Collection* const collection, const typename Collection::value_type& vt) { std::pair<typename Collection::iterator, bool> ret = collection->insert(vt); if (!ret.second) { ret.first->second = vt.second; return false; } return true; } template <class Collection> bool InsertOrUpdate(Collection* const collection, const typename Collection::value_type::first_type& key, const typename Collection::value_type::second_type& value) { return InsertOrUpdate(collection, typename Collection::value_type(key, value)); } template <class Collection> bool InsertIfNotPresent(Collection* const collection, const typename Collection::value_type& vt) { return collection->insert(vt).second; } template <class Collection> bool InsertIfNotPresent( Collection* const collection, const typename Collection::value_type::first_type& key, const typename Collection::value_type::second_type& value) { return InsertIfNotPresent(collection, typename Collection::value_type(key, value)); } template <class Collection> typename Collection::value_type::second_type& LookupOrInsert( Collection* const collection, const typename Collection::value_type& vt) { return collection->insert(vt).first->second; } template <class Collection> typename Collection::value_type::second_type& LookupOrInsert( Collection* const collection, const typename Collection::value_type::first_type& key, const typename Collection::value_type::second_type& value) { return LookupOrInsert(collection, typename Collection::value_type(key, value)); } template <typename M, typename ReverseM> bool ReverseMap(const M& m, ReverseM* reverse) { bool all_unique = true; for (const auto& kv : m) { if (!InsertOrUpdate(reverse, kv.second, kv.first)) { all_unique = false; } } return all_unique; } template <typename ReverseM, typename M> ReverseM ReverseMap(const M& m) { typename std::remove_const<ReverseM>::type reverse; ReverseMap(m, &reverse); return reverse; } template <typename Collection> typename Collection::value_type::second_type EraseKeyReturnValuePtr( Collection* collection, const typename Collection::value_type::first_type& key) { auto it = collection->find(key); if (it == collection->end()) return nullptr; auto v = gtl::subtle::GetMapped(*it); collection->erase(it); return v; } } } #endif
#include "tsl/lib/gtl/map_util.h" #include <map> #include <set> #include <string> #include "tsl/platform/test.h" #include "tsl/platform/types.h" namespace tsl { TEST(MapUtil, Find) { typedef std::map<string, string> Map; Map m; EXPECT_EQ("", gtl::FindWithDefault(m, "foo", "")); m["foo"] = "bar"; EXPECT_EQ("bar", gtl::FindWithDefault(m, "foo", "")); EXPECT_EQ("bar", *gtl::FindOrNull(m, "foo")); EXPECT_TRUE(m.count("foo") > 0); EXPECT_EQ(m["foo"], "bar"); } TEST(MapUtil, LookupOrInsert) { typedef std::map<string, string> Map; Map m; EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "xyz")); EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "abc")); } TEST(MapUtil, InsertIfNotPresent) { typedef std::set<int> Set; Set s; EXPECT_TRUE(gtl::InsertIfNotPresent(&s, 0)); EXPECT_EQ(s.count(0), 1); EXPECT_FALSE(gtl::InsertIfNotPresent(&s, 0)); EXPECT_EQ(s.count(0), 1); } }
template <class Collection> typename Collection::value_type::second_type& LookupOrInsert( Collection* const collection, const typename Collection::value_type::first_type& key, const typename Collection::value_type::second_type& value) { return LookupOrInsert(collection, typename Collection::value_type(key, value)); }
TEST(MapUtil, LookupOrInsert) { typedef std::map<string, string> Map; Map m; EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "xyz")); EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "abc")); }
#include "absl/strings/internal/cordz_sample_token.h" #include "absl/base/config.h" #include "absl/strings/internal/cordz_handle.h" #include "absl/strings/internal/cordz_info.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { CordzSampleToken::Iterator& CordzSampleToken::Iterator::operator++() { if (current_) { current_ = current_->Next(*token_); } return *this; } CordzSampleToken::Iterator CordzSampleToken::Iterator::operator++(int) { Iterator it(*this); operator++(); return it; } bool operator==(const CordzSampleToken::Iterator& lhs, const CordzSampleToken::Iterator& rhs) { return lhs.current_ == rhs.current_ && (lhs.current_ == nullptr || lhs.token_ == rhs.token_); } bool operator!=(const CordzSampleToken::Iterator& lhs, const CordzSampleToken::Iterator& rhs) { return !(lhs == rhs); } CordzSampleToken::Iterator::reference CordzSampleToken::Iterator::operator*() const { return *current_; } CordzSampleToken::Iterator::pointer CordzSampleToken::Iterator::operator->() const { return current_; } CordzSampleToken::Iterator::Iterator(const CordzSampleToken* token) : token_(token), current_(CordzInfo::Head(*token)) {} } ABSL_NAMESPACE_END }
#include "absl/strings/internal/cordz_sample_token.h" #include <memory> #include <type_traits> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/memory/memory.h" #include "absl/random/random.h" #include "absl/strings/cordz_test_helpers.h" #include "absl/strings/internal/cord_rep_flat.h" #include "absl/strings/internal/cordz_handle.h" #include "absl/strings/internal/cordz_info.h" #include "absl/synchronization/internal/thread_pool.h" #include "absl/synchronization/notification.h" #include "absl/time/clock.h" #include "absl/time/time.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { namespace { using ::testing::ElementsAre; using ::testing::Eq; using ::testing::Ne; auto constexpr kTrackCordMethod = CordzUpdateTracker::kConstructorString; TEST(CordzSampleTokenTest, IteratorTraits) { static_assert(std::is_copy_constructible<CordzSampleToken::Iterator>::value, ""); static_assert(std::is_copy_assignable<CordzSampleToken::Iterator>::value, ""); static_assert(std::is_move_constructible<CordzSampleToken::Iterator>::value, ""); static_assert(std::is_move_assignable<CordzSampleToken::Iterator>::value, ""); static_assert( std::is_same< std::iterator_traits<CordzSampleToken::Iterator>::iterator_category, std::input_iterator_tag>::value, ""); static_assert( std::is_same<std::iterator_traits<CordzSampleToken::Iterator>::value_type, const CordzInfo&>::value, ""); static_assert( std::is_same< std::iterator_traits<CordzSampleToken::Iterator>::difference_type, ptrdiff_t>::value, ""); static_assert( std::is_same<std::iterator_traits<CordzSampleToken::Iterator>::pointer, const CordzInfo*>::value, ""); static_assert( std::is_same<std::iterator_traits<CordzSampleToken::Iterator>::reference, const CordzInfo&>::value, ""); } TEST(CordzSampleTokenTest, IteratorEmpty) { CordzSampleToken token; EXPECT_THAT(token.begin(), Eq(token.end())); } TEST(CordzSampleTokenTest, Iterator) { TestCordData cord1, cord2, cord3; CordzInfo::TrackCord(cord1.data, kTrackCordMethod, 1); CordzInfo* info1 = cord1.data.cordz_info(); CordzInfo::TrackCord(cord2.data, kTrackCordMethod, 1); CordzInfo* info2 = cord2.data.cordz_info(); CordzInfo::TrackCord(cord3.data, kTrackCordMethod, 1); CordzInfo* info3 = cord3.data.cordz_info(); CordzSampleToken token; std::vector<const CordzInfo*> found; for (const CordzInfo& cord_info : token) { found.push_back(&cord_info); } EXPECT_THAT(found, ElementsAre(info3, info2, info1)); info1->Untrack(); info2->Untrack(); info3->Untrack(); } TEST(CordzSampleTokenTest, IteratorEquality) { TestCordData cord1; TestCordData cord2; TestCordData cord3; CordzInfo::TrackCord(cord1.data, kTrackCordMethod, 1); CordzInfo* info1 = cord1.data.cordz_info(); CordzSampleToken token1; CordzSampleToken::Iterator lhs = token1.begin(); CordzInfo::TrackCord(cord2.data, kTrackCordMethod, 1); CordzInfo* info2 = cord2.data.cordz_info(); CordzSampleToken token2; CordzSampleToken::Iterator rhs = token2.begin(); CordzInfo::TrackCord(cord3.data, kTrackCordMethod, 1); CordzInfo* info3 = cord3.data.cordz_info(); EXPECT_THAT(lhs, Ne(rhs)); rhs++; EXPECT_THAT(lhs, Ne(rhs)); lhs++; rhs++; EXPECT_THAT(lhs, Eq(rhs)); info1->Untrack(); info2->Untrack(); info3->Untrack(); } TEST(CordzSampleTokenTest, MultiThreaded) { Notification stop; static constexpr int kNumThreads = 4; static constexpr int kNumCords = 3; static constexpr int kNumTokens = 3; absl::synchronization_internal::ThreadPool pool(kNumThreads); for (int i = 0; i < kNumThreads; ++i) { pool.Schedule([&stop]() { absl::BitGen gen; TestCordData cords[kNumCords]; std::unique_ptr<CordzSampleToken> tokens[kNumTokens]; while (!stop.HasBeenNotified()) { int index = absl::Uniform(gen, 0, kNumCords); if (absl::Bernoulli(gen, 0.5)) { TestCordData& cord = cords[index]; if (cord.data.is_profiled()) { cord.data.cordz_info()->Untrack(); cord.data.clear_cordz_info(); } else { CordzInfo::TrackCord(cord.data, kTrackCordMethod, 1); } } else { std::unique_ptr<CordzSampleToken>& token = tokens[index]; if (token) { if (absl::Bernoulli(gen, 0.5)) { for (const CordzInfo& info : *token) { EXPECT_THAT(info.Next(*token), Ne(&info)); } } else { token = nullptr; } } else { token = absl::make_unique<CordzSampleToken>(); } } } for (TestCordData& cord : cords) { CordzInfo::MaybeUntrackCord(cord.data.cordz_info()); } }); } absl::SleepFor(absl::Seconds(3)); stop.Notify(); } } } ABSL_NAMESPACE_END }
#include "absl/strings/internal/cordz_sample_token.h" #include "absl/base/config.h" #include "absl/strings/internal/cordz_handle.h" #include "absl/strings/internal/cordz_info.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { CordzSampleToken::Iterator& CordzSampleToken::Iterator::operator++() { if (current_) { current_ = current_->Next(*token_); } return *this; }
TEST(CordzSampleTokenTest, Iterator) { TestCordData cord1, cord2, cord3; CordzInfo::TrackCord(cord1.data, kTrackCordMethod, 1); CordzInfo* info1 = cord1.data.cordz_info(); CordzInfo::TrackCord(cord2.data, kTrackCordMethod, 1); CordzInfo* info2 = cord2.data.cordz_info(); CordzInfo::TrackCord(cord3.data, kTrackCordMethod, 1); CordzInfo* info3 = cord3.data.cordz_info(); CordzSampleToken token; std::vector<const CordzInfo*> found; for (const CordzInfo& cord_info : token) { found.push_back(&cord_info); } EXPECT_THAT(found, ElementsAre(info3, info2, info1)); info1->Untrack(); info2->Untrack(); info3->Untrack(); }
#include "quiche/quic/tools/quic_simple_server_stream.h" #include <algorithm> #include <cstdint> #include <list> #include <optional> #include <string> #include <utility> #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "quiche/quic/core/http/quic_spdy_stream.h" #include "quiche/quic/core/http/spdy_utils.h" #include "quiche/quic/core/http/web_transport_http3.h" #include "quiche/quic/core/quic_error_codes.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/quic/tools/quic_simple_server_session.h" #include "quiche/spdy/core/spdy_protocol.h" using spdy::Http2HeaderBlock; namespace quic { QuicSimpleServerStream::QuicSimpleServerStream( QuicStreamId id, QuicSpdySession* session, StreamType type, QuicSimpleServerBackend* quic_simple_server_backend) : QuicSpdyServerStreamBase(id, session, type), content_length_(-1), generate_bytes_length_(0), quic_simple_server_backend_(quic_simple_server_backend) { QUICHE_DCHECK(quic_simple_server_backend_); } QuicSimpleServerStream::QuicSimpleServerStream( PendingStream* pending, QuicSpdySession* session, QuicSimpleServerBackend* quic_simple_server_backend) : QuicSpdyServerStreamBase(pending, session), content_length_(-1), generate_bytes_length_(0), quic_simple_server_backend_(quic_simple_server_backend) { QUICHE_DCHECK(quic_simple_server_backend_); } QuicSimpleServerStream::~QuicSimpleServerStream() { quic_simple_server_backend_->CloseBackendResponseStream(this); } void QuicSimpleServerStream::OnInitialHeadersComplete( bool fin, size_t frame_len, const QuicHeaderList& header_list) { QuicSpdyStream::OnInitialHeadersComplete(fin, frame_len, header_list); if (!response_sent_ && !SpdyUtils::CopyAndValidateHeaders(header_list, &content_length_, &request_headers_)) { QUIC_DVLOG(1) << "Invalid headers"; SendErrorResponse(); } ConsumeHeaderList(); if (!fin && !response_sent_ && IsConnectRequest()) { if (quic_simple_server_backend_ == nullptr) { QUIC_DVLOG(1) << "Backend is missing on CONNECT headers."; SendErrorResponse(); return; } if (web_transport() != nullptr) { QuicSimpleServerBackend::WebTransportResponse response = quic_simple_server_backend_->ProcessWebTransportRequest( request_headers_, web_transport()); if (response.response_headers[":status"] == "200") { WriteHeaders(std::move(response.response_headers), false, nullptr); if (response.visitor != nullptr) { web_transport()->SetVisitor(std::move(response.visitor)); } web_transport()->HeadersReceived(request_headers_); } else { WriteHeaders(std::move(response.response_headers), true, nullptr); } return; } quic_simple_server_backend_->HandleConnectHeaders(request_headers_, this); } } void QuicSimpleServerStream::OnBodyAvailable() { while (HasBytesToRead()) { struct iovec iov; if (GetReadableRegions(&iov, 1) == 0) { break; } QUIC_DVLOG(1) << "Stream " << id() << " processed " << iov.iov_len << " bytes."; body_.append(static_cast<char*>(iov.iov_base), iov.iov_len); if (content_length_ >= 0 && body_.size() > static_cast<uint64_t>(content_length_)) { QUIC_DVLOG(1) << "Body size (" << body_.size() << ") > content length (" << content_length_ << ")."; SendErrorResponse(); return; } MarkConsumed(iov.iov_len); } if (!sequencer()->IsClosed()) { if (IsConnectRequest()) { HandleRequestConnectData(false); } sequencer()->SetUnblocked(); return; } OnFinRead(); if (write_side_closed() || fin_buffered()) { return; } if (IsConnectRequest()) { HandleRequestConnectData(true); } else { SendResponse(); } } void QuicSimpleServerStream::HandleRequestConnectData(bool fin_received) { QUICHE_DCHECK(IsConnectRequest()); if (quic_simple_server_backend_ == nullptr) { QUIC_DVLOG(1) << "Backend is missing on CONNECT data."; ResetWriteSide( QuicResetStreamError::FromInternal(QUIC_STREAM_CONNECT_ERROR)); return; } std::string data = std::move(body_); body_.clear(); quic_simple_server_backend_->HandleConnectData(data, fin_received, this); } void QuicSimpleServerStream::SendResponse() { QUICHE_DCHECK(!IsConnectRequest()); if (request_headers_.empty()) { QUIC_DVLOG(1) << "Request headers empty."; SendErrorResponse(); return; } if (content_length_ > 0 && static_cast<uint64_t>(content_length_) != body_.size()) { QUIC_DVLOG(1) << "Content length (" << content_length_ << ") != body size (" << body_.size() << ")."; SendErrorResponse(); return; } if (!request_headers_.contains(":authority")) { QUIC_DVLOG(1) << "Request headers do not contain :authority."; SendErrorResponse(); return; } if (!request_headers_.contains(":path")) { QUIC_DVLOG(1) << "Request headers do not contain :path."; SendErrorResponse(); return; } if (quic_simple_server_backend_ == nullptr) { QUIC_DVLOG(1) << "Backend is missing in SendResponse()."; SendErrorResponse(); return; } if (web_transport() != nullptr) { QuicSimpleServerBackend::WebTransportResponse response = quic_simple_server_backend_->ProcessWebTransportRequest( request_headers_, web_transport()); if (response.response_headers[":status"] == "200") { WriteHeaders(std::move(response.response_headers), false, nullptr); if (response.visitor != nullptr) { web_transport()->SetVisitor(std::move(response.visitor)); } web_transport()->HeadersReceived(request_headers_); } else { WriteHeaders(std::move(response.response_headers), true, nullptr); } return; } quic_simple_server_backend_->FetchResponseFromBackend(request_headers_, body_, this); } QuicConnectionId QuicSimpleServerStream::connection_id() const { return spdy_session()->connection_id(); } QuicStreamId QuicSimpleServerStream::stream_id() const { return id(); } std::string QuicSimpleServerStream::peer_host() const { return spdy_session()->peer_address().host().ToString(); } QuicSpdyStream* QuicSimpleServerStream::GetStream() { return this; } namespace { class DelayedResponseAlarm : public QuicAlarm::DelegateWithContext { public: DelayedResponseAlarm(QuicSimpleServerStream* stream, const QuicBackendResponse* response) : QuicAlarm::DelegateWithContext( stream->spdy_session()->connection()->context()), stream_(stream), response_(response) { stream_ = stream; response_ = response; } ~DelayedResponseAlarm() override = default; void OnAlarm() override { stream_->Respond(response_); } private: QuicSimpleServerStream* stream_; const QuicBackendResponse* response_; }; } void QuicSimpleServerStream::OnResponseBackendComplete( const QuicBackendResponse* response) { if (response == nullptr) { QUIC_DVLOG(1) << "Response not found in cache."; SendNotFoundResponse(); return; } auto delay = response->delay(); if (delay.IsZero()) { Respond(response); return; } auto* connection = session()->connection(); delayed_response_alarm_.reset(connection->alarm_factory()->CreateAlarm( new DelayedResponseAlarm(this, response))); delayed_response_alarm_->Set(connection->clock()->Now() + delay); } void QuicSimpleServerStream::Respond(const QuicBackendResponse* response) { for (const auto& headers : response->early_hints()) { QUIC_DVLOG(1) << "Stream " << id() << " sending an Early Hints response: " << headers.DebugString(); WriteHeaders(headers.Clone(), false, nullptr); } if (response->response_type() == QuicBackendResponse::CLOSE_CONNECTION) { QUIC_DVLOG(1) << "Special response: closing connection."; OnUnrecoverableError(QUIC_NO_ERROR, "Toy server forcing close"); return; } if (response->response_type() == QuicBackendResponse::IGNORE_REQUEST) { QUIC_DVLOG(1) << "Special response: ignoring request."; return; } if (response->response_type() == QuicBackendResponse::BACKEND_ERR_RESPONSE) { QUIC_DVLOG(1) << "Quic Proxy: Backend connection error."; SendErrorResponse(502); return; } std::string request_url = request_headers_[":authority"].as_string() + request_headers_[":path"].as_string(); int response_code; const Http2HeaderBlock& response_headers = response->headers(); if (!ParseHeaderStatusCode(response_headers, &response_code)) { auto status = response_headers.find(":status"); if (status == response_headers.end()) { QUIC_LOG(WARNING) << ":status not present in response from cache for request " << request_url; } else { QUIC_LOG(WARNING) << "Illegal (non-integer) response :status from cache: " << status->second << " for request " << request_url; } SendErrorResponse(); return; } if (response->response_type() == QuicBackendResponse::INCOMPLETE_RESPONSE) { QUIC_DVLOG(1) << "Stream " << id() << " sending an incomplete response, i.e. no trailer, no fin."; SendIncompleteResponse(response->headers().Clone(), response->body()); return; } if (response->response_type() == QuicBackendResponse::GENERATE_BYTES) { QUIC_DVLOG(1) << "Stream " << id() << " sending a generate bytes response."; std::string path = request_headers_[":path"].as_string().substr(1); if (!absl::SimpleAtoi(path, &generate_bytes_length_)) { QUIC_LOG(ERROR) << "Path is not a number."; SendNotFoundResponse(); return; } Http2HeaderBlock headers = response->headers().Clone(); headers["content-length"] = absl::StrCat(generate_bytes_length_); WriteHeaders(std::move(headers), false, nullptr); QUICHE_DCHECK(!response_sent_); response_sent_ = true; WriteGeneratedBytes(); return; } QUIC_DVLOG(1) << "Stream " << id() << " sending response."; SendHeadersAndBodyAndTrailers(response->headers().Clone(), response->body(), response->trailers().Clone()); } void QuicSimpleServerStream::SendStreamData(absl::string_view data, bool close_stream) { QUICHE_DCHECK(!data.empty() || close_stream); if (close_stream) { SendHeadersAndBodyAndTrailers( std::nullopt, data, spdy::Http2HeaderBlock()); } else { SendIncompleteResponse(std::nullopt, data); } } void QuicSimpleServerStream::TerminateStreamWithError( QuicResetStreamError error) { QUIC_DVLOG(1) << "Stream " << id() << " abruptly terminating with error " << error.internal_code(); ResetWriteSide(error); } void QuicSimpleServerStream::OnCanWrite() { QuicSpdyStream::OnCanWrite(); WriteGeneratedBytes(); } void QuicSimpleServerStream::WriteGeneratedBytes() { static size_t kChunkSize = 1024; while (!HasBufferedData() && generate_bytes_length_ > 0) { size_t len = std::min<size_t>(kChunkSize, generate_bytes_length_); std::string data(len, 'a'); generate_bytes_length_ -= len; bool fin = generate_bytes_length_ == 0; WriteOrBufferBody(data, fin); } } void QuicSimpleServerStream::SendNotFoundResponse() { QUIC_DVLOG(1) << "Stream " << id() << " sending not found response."; Http2HeaderBlock headers; headers[":status"] = "404"; headers["content-length"] = absl::StrCat(strlen(kNotFoundResponseBody)); SendHeadersAndBody(std::move(headers), kNotFoundResponseBody); } void QuicSimpleServerStream::SendErrorResponse() { SendErrorResponse(0); } void QuicSimpleServerStream::SendErrorResponse(int resp_code) { QUIC_DVLOG(1) << "Stream " << id() << " sending error response."; if (!reading_stopped()) { StopReading(); } Http2HeaderBlock headers; if (resp_code <= 0) { headers[":status"] = "500"; } else { headers[":status"] = absl::StrCat(resp_code); } headers["content-length"] = absl::StrCat(strlen(kErrorResponseBody)); SendHeadersAndBody(std::move(headers), kErrorResponseBody); } void QuicSimpleServerStream::SendIncompleteResponse( std::optional<Http2HeaderBlock> response_headers, absl::string_view body) { QUICHE_DCHECK_NE(response_headers.has_value(), response_sent_); if (response_headers.has_value()) { QUIC_DLOG(INFO) << "Stream " << id() << " writing headers (fin = false) : " << response_headers.value().DebugString(); int response_code; if (!ParseHeaderStatusCode(*response_headers, &response_code) || response_code != 100) { response_sent_ = true; } WriteHeaders(std::move(response_headers).value(), false, nullptr); } QUIC_DLOG(INFO) << "Stream " << id() << " writing body (fin = false) with size: " << body.size(); if (!body.empty()) { WriteOrBufferBody(body, false); } } void QuicSimpleServerStream::SendHeadersAndBody( Http2HeaderBlock response_headers, absl::string_view body) { SendHeadersAndBodyAndTrailers(std::move(response_headers), body, Http2HeaderBlock()); } void QuicSimpleServerStream::SendHeadersAndBodyAndTrailers( std::optional<Http2HeaderBlock> response_headers, absl::string_view body, Http2HeaderBlock response_trailers) { QUICHE_DCHECK_NE(response_headers.has_value(), response_sent_); if (response_headers.has_value()) { bool send_fin = (body.empty() && response_trailers.empty()); QUIC_DLOG(INFO) << "Stream " << id() << " writing headers (fin = " << send_fin << ") : " << response_headers.value().DebugString(); WriteHeaders(std::move(response_headers).value(), send_fin, nullptr); response_sent_ = true; if (send_fin) { return; } } bool send_fin = response_trailers.empty(); QUIC_DLOG(INFO) << "Stream " << id() << " writing body (fin = " << send_fin << ") with size: " << body.size(); if (!body.empty() || send_fin) { WriteOrBufferBody(body, send_fin); } if (send_fin) { return; } QUIC_DLOG(INFO) << "Stream " << id() << " writing trailers (fin = true): " << response_trailers.DebugString(); WriteTrailers(std::move(response_trailers), nullptr); } bool QuicSimpleServerStream::IsConnectRequest() const { auto method_it = request_headers_.find(":method"); return method_it != request_headers_.end() && method_it->second == "CONNECT"; } void QuicSimpleServerStream::OnInvalidHeaders() { QUIC_DVLOG(1) << "Invalid headers"; SendErrorResponse(400); } const char* const QuicSimpleServerStream::kErrorResponseBody = "bad"; const char* const QuicSimpleServerStream::kNotFoundResponseBody = "file not found"; }
#include "quiche/quic/tools/quic_simple_server_stream.h" #include <list> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/base/macros.h" #include "absl/memory/memory.h" #include "absl/strings/string_view.h" #include "quiche/quic/core/crypto/null_encrypter.h" #include "quiche/quic/core/http/http_encoder.h" #include "quiche/quic/core/http/spdy_utils.h" #include "quiche/quic/core/quic_alarm_factory.h" #include "quiche/quic/core/quic_default_clock.h" #include "quiche/quic/core/quic_error_codes.h" #include "quiche/quic/core/quic_types.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/platform/api/quic_expect_bug.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_socket_address.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/crypto_test_utils.h" #include "quiche/quic/test_tools/quic_config_peer.h" #include "quiche/quic/test_tools/quic_connection_peer.h" #include "quiche/quic/test_tools/quic_session_peer.h" #include "quiche/quic/test_tools/quic_spdy_session_peer.h" #include "quiche/quic/test_tools/quic_stream_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" #include "quiche/quic/test_tools/simulator/simulator.h" #include "quiche/quic/tools/quic_backend_response.h" #include "quiche/quic/tools/quic_memory_cache_backend.h" #include "quiche/quic/tools/quic_simple_server_backend.h" #include "quiche/quic/tools/quic_simple_server_session.h" #include "quiche/common/simple_buffer_allocator.h" using testing::_; using testing::AnyNumber; using testing::InSequence; using testing::Invoke; using testing::StrictMock; namespace quic { namespace test { const size_t kFakeFrameLen = 60; const size_t kErrorLength = strlen(QuicSimpleServerStream::kErrorResponseBody); const size_t kDataFrameHeaderLength = 2; class TestStream : public QuicSimpleServerStream { public: TestStream(QuicStreamId stream_id, QuicSpdySession* session, StreamType type, QuicSimpleServerBackend* quic_simple_server_backend) : QuicSimpleServerStream(stream_id, session, type, quic_simple_server_backend) { EXPECT_CALL(*this, WriteOrBufferBody(_, _)) .Times(AnyNumber()) .WillRepeatedly([this](absl::string_view data, bool fin) { this->QuicSimpleServerStream::WriteOrBufferBody(data, fin); }); } ~TestStream() override = default; MOCK_METHOD(void, FireAlarmMock, (), ()); MOCK_METHOD(void, WriteHeadersMock, (bool fin), ()); MOCK_METHOD(void, WriteEarlyHintsHeadersMock, (bool fin), ()); MOCK_METHOD(void, WriteOrBufferBody, (absl::string_view data, bool fin), (override)); size_t WriteHeaders( spdy::Http2HeaderBlock header_block, bool fin, quiche::QuicheReferenceCountedPointer<QuicAckListenerInterface> ) override { if (header_block[":status"] == "103") { WriteEarlyHintsHeadersMock(fin); } else { WriteHeadersMock(fin); } return 0; } void DoSendResponse() { SendResponse(); } void DoSendErrorResponse() { QuicSimpleServerStream::SendErrorResponse(); } spdy::Http2HeaderBlock* mutable_headers() { return &request_headers_; } void set_body(std::string body) { body_ = std::move(body); } const std::string& body() const { return body_; } int content_length() const { return content_length_; } bool send_response_was_called() const { return send_response_was_called_; } bool send_error_response_was_called() const { return send_error_response_was_called_; } absl::string_view GetHeader(absl::string_view key) const { auto it = request_headers_.find(key); QUICHE_DCHECK(it != request_headers_.end()); return it->second; } void ReplaceBackend(QuicSimpleServerBackend* backend) { set_quic_simple_server_backend_for_test(backend); } protected: void SendResponse() override { send_response_was_called_ = true; QuicSimpleServerStream::SendResponse(); } void SendErrorResponse(int resp_code) override { send_error_response_was_called_ = true; QuicSimpleServerStream::SendErrorResponse(resp_code); } private: bool send_response_was_called_ = false; bool send_error_response_was_called_ = false; }; namespace { class MockQuicSimpleServerSession : public QuicSimpleServerSession { public: const size_t kMaxStreamsForTest = 100; MockQuicSimpleServerSession( QuicConnection* connection, MockQuicSessionVisitor* owner, MockQuicCryptoServerStreamHelper* helper, QuicCryptoServerConfig* crypto_config, QuicCompressedCertsCache* compressed_certs_cache, QuicSimpleServerBackend* quic_simple_server_backend) : QuicSimpleServerSession(DefaultQuicConfig(), CurrentSupportedVersions(), connection, owner, helper, crypto_config, compressed_certs_cache, quic_simple_server_backend) { if (VersionHasIetfQuicFrames(connection->transport_version())) { QuicSessionPeer::SetMaxOpenIncomingUnidirectionalStreams( this, kMaxStreamsForTest); QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams( this, kMaxStreamsForTest); } else { QuicSessionPeer::SetMaxOpenIncomingStreams(this, kMaxStreamsForTest); QuicSessionPeer::SetMaxOpenOutgoingStreams(this, kMaxStreamsForTest); } ON_CALL(*this, WritevData(_, _, _, _, _, _)) .WillByDefault(Invoke(this, &MockQuicSimpleServerSession::ConsumeData)); } MockQuicSimpleServerSession(const MockQuicSimpleServerSession&) = delete; MockQuicSimpleServerSession& operator=(const MockQuicSimpleServerSession&) = delete; ~MockQuicSimpleServerSession() override = default; MOCK_METHOD(void, OnConnectionClosed, (const QuicConnectionCloseFrame& frame, ConnectionCloseSource source), (override)); MOCK_METHOD(QuicSpdyStream*, CreateIncomingStream, (QuicStreamId id), (override)); MOCK_METHOD(QuicConsumedData, WritevData, (QuicStreamId id, size_t write_length, QuicStreamOffset offset, StreamSendingState state, TransmissionType type, EncryptionLevel level), (override)); MOCK_METHOD(void, OnStreamHeaderList, (QuicStreamId stream_id, bool fin, size_t frame_len, const QuicHeaderList& header_list), (override)); MOCK_METHOD(void, OnStreamHeadersPriority, (QuicStreamId stream_id, const spdy::SpdyStreamPrecedence& precedence), (override)); MOCK_METHOD(void, MaybeSendRstStreamFrame, (QuicStreamId stream_id, QuicResetStreamError error, QuicStreamOffset bytes_written), (override)); MOCK_METHOD(void, MaybeSendStopSendingFrame, (QuicStreamId stream_id, QuicResetStreamError error), (override)); using QuicSession::ActivateStream; QuicConsumedData ConsumeData(QuicStreamId id, size_t write_length, QuicStreamOffset offset, StreamSendingState state, TransmissionType , std::optional<EncryptionLevel> ) { if (write_length > 0) { auto buf = std::make_unique<char[]>(write_length); QuicStream* stream = GetOrCreateStream(id); QUICHE_DCHECK(stream); QuicDataWriter writer(write_length, buf.get(), quiche::HOST_BYTE_ORDER); stream->WriteStreamData(offset, write_length, &writer); } else { QUICHE_DCHECK(state != NO_FIN); } return QuicConsumedData(write_length, state != NO_FIN); } spdy::Http2HeaderBlock original_request_headers_; }; class QuicSimpleServerStreamTest : public QuicTestWithParam<ParsedQuicVersion> { public: QuicSimpleServerStreamTest() : connection_(new StrictMock<MockQuicConnection>( &simulator_, simulator_.GetAlarmFactory(), Perspective::IS_SERVER, SupportedVersions(GetParam()))), crypto_config_(new QuicCryptoServerConfig( QuicCryptoServerConfig::TESTING, QuicRandom::GetInstance(), crypto_test_utils::ProofSourceForTesting(), KeyExchangeSource::Default())), compressed_certs_cache_( QuicCompressedCertsCache::kQuicCompressedCertsCacheSize), session_(connection_, &session_owner_, &session_helper_, crypto_config_.get(), &compressed_certs_cache_, &memory_cache_backend_), quic_response_(new QuicBackendResponse), body_("hello world") { connection_->set_visitor(&session_); header_list_.OnHeaderBlockStart(); header_list_.OnHeader(":authority", "www.google.com"); header_list_.OnHeader(":path", "/"); header_list_.OnHeader(":method", "POST"); header_list_.OnHeader(":scheme", "https"); header_list_.OnHeader("content-length", "11"); header_list_.OnHeaderBlockEnd(128, 128); session_.config()->SetInitialStreamFlowControlWindowToSend( kInitialStreamFlowControlWindowForTest); session_.config()->SetInitialSessionFlowControlWindowToSend( kInitialSessionFlowControlWindowForTest); session_.Initialize(); connection_->SetEncrypter( quic::ENCRYPTION_FORWARD_SECURE, std::make_unique<quic::NullEncrypter>(connection_->perspective())); if (connection_->version().SupportsAntiAmplificationLimit()) { QuicConnectionPeer::SetAddressValidated(connection_); } stream_ = new StrictMock<TestStream>( GetNthClientInitiatedBidirectionalStreamId( connection_->transport_version(), 0), &session_, BIDIRECTIONAL, &memory_cache_backend_); session_.ActivateStream(absl::WrapUnique(stream_)); QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow( session_.config(), kMinimumFlowControlSendWindow); QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional( session_.config(), kMinimumFlowControlSendWindow); QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional( session_.config(), kMinimumFlowControlSendWindow); QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional( session_.config(), kMinimumFlowControlSendWindow); QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(session_.config(), 10); session_.OnConfigNegotiated(); simulator_.RunFor(QuicTime::Delta::FromSeconds(1)); } const std::string& StreamBody() { return stream_->body(); } std::string StreamHeadersValue(const std::string& key) { return (*stream_->mutable_headers())[key].as_string(); } bool UsesHttp3() const { return VersionUsesHttp3(connection_->transport_version()); } void ReplaceBackend(std::unique_ptr<QuicSimpleServerBackend> backend) { replacement_backend_ = std::move(backend); stream_->ReplaceBackend(replacement_backend_.get()); } quic::simulator::Simulator simulator_; spdy::Http2HeaderBlock response_headers_; MockQuicConnectionHelper helper_; StrictMock<MockQuicConnection>* connection_; StrictMock<MockQuicSessionVisitor> session_owner_; StrictMock<MockQuicCryptoServerStreamHelper> session_helper_; std::unique_ptr<QuicCryptoServerConfig> crypto_config_; QuicCompressedCertsCache compressed_certs_cache_; QuicMemoryCacheBackend memory_cache_backend_; std::unique_ptr<QuicSimpleServerBackend> replacement_backend_; StrictMock<MockQuicSimpleServerSession> session_; StrictMock<TestStream>* stream_; std::unique_ptr<QuicBackendResponse> quic_response_; std::string body_; QuicHeaderList header_list_; }; INSTANTIATE_TEST_SUITE_P(Tests, QuicSimpleServerStreamTest, ::testing::ValuesIn(AllSupportedVersions()), ::testing::PrintToStringParamName()); TEST_P(QuicSimpleServerStreamTest, TestFraming) { EXPECT_CALL(session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly( Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData)); stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list_); quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader( body_.length(), quiche::SimpleBufferAllocator::Get()); std::string data = UsesHttp3() ? absl::StrCat(header.AsStringView(), body_) : body_; stream_->OnStreamFrame( QuicStreamFrame(stream_->id(), false, 0, data)); EXPECT_EQ("11", StreamHeadersValue("content-length")); EXPECT_EQ("/", StreamHeadersValue(":path")); EXPECT_EQ("POST", StreamHeadersValue(":method")); EXPECT_EQ(body_, StreamBody()); } TEST_P(QuicSimpleServerStreamTest, TestFramingOnePacket) { EXPECT_CALL(session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly( Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData)); stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list_); quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader( body_.length(), quiche::SimpleBufferAllocator::Get()); std::string data = UsesHttp3() ? absl::StrCat(header.AsStringView(), body_) : body_; stream_->OnStreamFrame( QuicStreamFrame(stream_->id(), false, 0, data)); EXPECT_EQ("11", StreamHeadersValue("content-length")); EXPECT_EQ("/", StreamHeadersValue(":path")); EXPECT_EQ("POST", StreamHeadersValue(":method")); EXPECT_EQ(body_, StreamBody()); } TEST_P(QuicSimpleServerStreamTest, SendQuicRstStreamNoErrorInStopReading) { EXPECT_CALL(session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly( Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData)); EXPECT_FALSE(stream_->fin_received()); EXPECT_FALSE(stream_->rst_received()); QuicStreamPeer::SetFinSent(stream_); stream_->CloseWriteSide(); if (session_.version().UsesHttp3()) { EXPECT_CALL(session_, MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal( QUIC_STREAM_NO_ERROR))) .Times(1); } else { EXPECT_CALL( session_, MaybeSendRstStreamFrame( _, QuicResetStreamError::FromInternal(QUIC_STREAM_NO_ERROR), _)) .Times(1); } stream_->StopReading(); } TEST_P(QuicSimpleServerStreamTest, TestFramingExtraData) { InSequence seq; std::string large_body = "hello world!!!!!!"; EXPECT_CALL(*stream_, WriteHeadersMock(false)); if (UsesHttp3()) { EXPECT_CALL(session_, WritevData(_, kDataFrameHeaderLength, _, NO_FIN, _, _)); } EXPECT_CALL(session_, WritevData(_, kErrorLength, _, FIN, _, _)); stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list_); quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader( body_.length(), quiche::SimpleBufferAllocator::Get()); std::string data = UsesHttp3() ? absl::StrCat(header.AsStringView(), body_) : body_; stream_->OnStreamFrame( QuicStreamFrame(stream_->id(), false, 0, data)); header = HttpEncoder::SerializeDataFrameHeader( large_body.length(), quiche::SimpleBufferAllocator::Get()); std::string data2 = UsesHttp3() ? absl::StrCat(header.AsStringView(), large_body) : large_body; stream_->OnStreamFrame( QuicStreamFrame(stream_->id(), true, data.size(), data2)); EXPECT_EQ("11", StreamHeadersValue("content-length")); EXPECT_EQ("/", StreamHeadersValue(":path")); EXPECT_EQ("POST", StreamHeadersValue(":method")); } TEST_P(QuicSimpleServerStreamTest, SendResponseWithIllegalResponseStatus) { spdy::Http2HeaderBlock* request_headers = stream_->mutable_headers(); (*request_headers)[":path"] = "/bar"; (*request_headers)[":authority"] = "www.google.com"; (*request_headers)[":method"] = "GET"; response_headers_[":status"] = "200 OK"; response_headers_["content-length"] = "5"; std::string body = "Yummm"; quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader( body.length(), quiche::SimpleBufferAllocator::Get()); memory_cache_backend_.AddResponse("www.google.com", "/bar", std::move(response_headers_), body); QuicStreamPeer::SetFinReceived(stream_); InSequence s; EXPECT_CALL(*stream_, WriteHeadersMock(false)); if (UsesHttp3()) { EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _)); } EXPECT_CALL(session_, WritevData(_, kErrorLength, _, FIN, _, _)); stream_->DoSendResponse(); EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->write_side_closed()); } TEST_P(QuicSimpleServerStreamTest, SendResponseWithIllegalResponseStatus2) { spdy::Http2HeaderBlock* request_headers = stream_->mutable_headers(); (*request_headers)[":path"] = "/bar"; (*request_headers)[":authority"] = "www.google.com"; (*request_headers)[":method"] = "GET"; response_headers_[":status"] = "+200"; response_headers_["content-length"] = "5"; std::string body = "Yummm"; quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader( body.length(), quiche::SimpleBufferAllocator::Get()); memory_cache_backend_.AddResponse("www.google.com", "/bar", std::move(response_headers_), body); QuicStreamPeer::SetFinReceived(stream_); InSequence s; EXPECT_CALL(*stream_, WriteHeadersMock(false)); if (UsesHttp3()) { EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _)); } EXPECT_CALL(session_, WritevData(_, kErrorLength, _, FIN, _, _)); stream_->DoSendResponse(); EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->write_side_closed()); } TEST_P(QuicSimpleServerStreamTest, SendResponseWithValidHeaders) { spdy::Http2HeaderBlock* request_headers = stream_->mutable_headers(); (*request_headers)[":path"] = "/bar"; (*request_headers)[":authority"] = "www.google.com"; (*request_headers)[":method"] = "GET"; response_headers_[":status"] = "200"; response_headers_["content-length"] = "5"; std::string body = "Yummm"; quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader( body.length(), quiche::SimpleBufferAllocator::Get()); memory_cache_backend_.AddResponse("www.google.com", "/bar", std::move(response_headers_), body); QuicStreamPeer::SetFinReceived(stream_); InSequence s; EXPECT_CALL(*stream_, WriteHeadersMock(false)); if (UsesHttp3()) { EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _)); } EXPECT_CALL(session_, WritevData(_, body.length(), _, FIN, _, _)); stream_->DoSendResponse(); EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->write_side_closed()); } TEST_P(QuicSimpleServerStreamTest, SendResponseWithEarlyHints) { std::string host = "www.google.com"; std::string request_path = "/foo"; std::string body = "Yummm"; spdy::Http2HeaderBlock* request_headers = stream_->mutable_headers(); (*request_headers)[":path"] = request_path; (*request_headers)[":authority"] = host; (*request_headers)[":method"] = "GET"; quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader( body.length(), quiche::SimpleBufferAllocator::Get()); std::vector<spdy::Http2HeaderBlock> early_hints; const size_t kNumEarlyHintsResponses = 2; for (size_t i = 0; i < kNumEarlyHintsResponses; ++i) { spdy::Http2HeaderBlock hints; hints["link"] = "</image.png>; rel=preload; as=image"; early_hints.push_back(std::move(hints)); } response_headers_[":status"] = "200"; response_headers_["content-length"] = "5"; memory_cache_backend_.AddResponseWithEarlyHints( host, request_path, std::move(response_headers_), body, early_hints); QuicStreamPeer::SetFinReceived(stream_); InSequence s; for (size_t i = 0; i < kNumEarlyHintsResponses; ++i) { EXPECT_CALL(*stream_, WriteEarlyHintsHeadersMock(false)); } EXPECT_CALL(*stream_, WriteHeadersMock(false)); if (UsesHttp3()) { EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _)); } EXPECT_CALL(session_, WritevData(_, body.length(), _, FIN, _, _)); stream_->DoSendResponse(); EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->write_side_closed()); } class AlarmTestDelegate : public QuicAlarm::DelegateWithoutContext { public: AlarmTestDelegate(TestStream* stream) : stream_(stream) {} void OnAlarm() override { stream_->FireAlarmMock(); } private: TestStream* stream_; }; TEST_P(QuicSimpleServerStreamTest, SendResponseWithDelay) { spdy::Http2HeaderBlock* request_headers = stream_->mutable_headers(); std::string host = "www.google.com"; std::string path = "/bar"; (*request_headers)[":path"] = path; (*request_headers)[":authority"] = host; (*request_headers)[":method"] = "GET"; response_headers_[":status"] = "200"; response_headers_["content-length"] = "5"; std::string body = "Yummm"; QuicTime::Delta delay = QuicTime::Delta::FromMilliseconds(3000); quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader( body.length(), quiche::SimpleBufferAllocator::Get()); memory_cache_backend_.AddResponse(host, path, std::move(response_headers_), body); auto did_delay_succeed = memory_cache_backend_.SetResponseDelay(host, path, delay); EXPECT_TRUE(did_delay_succeed); auto did_invalid_delay_succeed = memory_cache_backend_.SetResponseDelay(host, "nonsense", delay); EXPECT_FALSE(did_invalid_delay_succeed); std::unique_ptr<QuicAlarm> alarm(connection_->alarm_factory()->CreateAlarm( new AlarmTestDelegate(stream_))); alarm->Set(connection_->clock()->Now() + delay); QuicStreamPeer::SetFinReceived(stream_); InSequence s; EXPECT_CALL(*stream_, FireAlarmMock()); EXPECT_CALL(*stream_, WriteHeadersMock(false)); if (UsesHttp3()) { EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _)); } EXPECT_CALL(session_, WritevData(_, body.length(), _, FIN, _, _)); stream_->DoSendResponse(); simulator_.RunFor(delay); EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->write_side_closed()); } TEST_P(QuicSimpleServerStreamTest, TestSendErrorResponse) { QuicStreamPeer::SetFinReceived(stream_); InSequence s; EXPECT_CALL(*stream_, WriteHeadersMock(false)); if (UsesHttp3()) { EXPECT_CALL(session_, WritevData(_, kDataFrameHeaderLength, _, NO_FIN, _, _)); } EXPECT_CALL(session_, WritevData(_, kErrorLength, _, FIN, _, _)); stream_->DoSendErrorResponse(); EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->write_side_closed()); } TEST_P(QuicSimpleServerStreamTest, InvalidMultipleContentLength) { spdy::Http2HeaderBlock request_headers; header_list_.OnHeader("content-length", absl::string_view("11\00012", 5)); if (session_.version().UsesHttp3()) { EXPECT_CALL(session_, MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal( QUIC_STREAM_NO_ERROR))); } EXPECT_CALL(*stream_, WriteHeadersMock(false)); EXPECT_CALL(session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly( Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData)); stream_->OnStreamHeaderList(true, kFakeFrameLen, header_list_); EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->reading_stopped()); EXPECT_TRUE(stream_->write_side_closed()); } TEST_P(QuicSimpleServerStreamTest, InvalidLeadingNullContentLength) { spdy::Http2HeaderBlock request_headers; header_list_.OnHeader("content-length", absl::string_view("\00012", 3)); if (session_.version().UsesHttp3()) { EXPECT_CALL(session_, MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal( QUIC_STREAM_NO_ERROR))); } EXPECT_CALL(*stream_, WriteHeadersMock(false)); EXPECT_CALL(session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly( Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData)); stream_->OnStreamHeaderList(true, kFakeFrameLen, header_list_); EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->reading_stopped()); EXPECT_TRUE(stream_->write_side_closed()); } TEST_P(QuicSimpleServerStreamTest, InvalidMultipleContentLengthII) { spdy::Http2HeaderBlock request_headers; header_list_.OnHeader("content-length", absl::string_view("11\00011", 5)); if (session_.version().UsesHttp3()) { EXPECT_CALL(session_, MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal( QUIC_STREAM_NO_ERROR))); EXPECT_CALL(*stream_, WriteHeadersMock(false)); EXPECT_CALL(session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly( Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData)); } stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list_); if (session_.version().UsesHttp3()) { EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->reading_stopped()); EXPECT_TRUE(stream_->write_side_closed()); } else { EXPECT_EQ(11, stream_->content_length()); EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_FALSE(stream_->reading_stopped()); EXPECT_FALSE(stream_->write_side_closed()); } } TEST_P(QuicSimpleServerStreamTest, DoNotSendQuicRstStreamNoErrorWithRstReceived) { EXPECT_FALSE(stream_->reading_stopped()); if (VersionUsesHttp3(connection_->transport_version())) { auto* qpack_decoder_stream = QuicSpdySessionPeer::GetQpackDecoderSendStream(&session_); EXPECT_CALL(session_, WritevData(qpack_decoder_stream->id(), _, _, _, _, _)) .Times(AnyNumber()); } EXPECT_CALL( session_, MaybeSendRstStreamFrame( _, session_.version().UsesHttp3() ? QuicResetStreamError::FromInternal(QUIC_STREAM_CANCELLED) : QuicResetStreamError::FromInternal(QUIC_RST_ACKNOWLEDGEMENT), _)) .Times(1); QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(), QUIC_STREAM_CANCELLED, 1234); stream_->OnStreamReset(rst_frame); if (VersionHasIetfQuicFrames(connection_->transport_version())) { EXPECT_CALL(session_owner_, OnStopSendingReceived(_)); QuicStopSendingFrame stop_sending(kInvalidControlFrameId, stream_->id(), QUIC_STREAM_CANCELLED); session_.OnStopSendingFrame(stop_sending); } EXPECT_TRUE(stream_->reading_stopped()); EXPECT_TRUE(stream_->write_side_closed()); } TEST_P(QuicSimpleServerStreamTest, InvalidHeadersWithFin) { char arr[] = { 0x3a, 0x68, 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x00, 0x00, 0x00, 0x03, 0x47, 0x45, 0x54, 0x00, 0x00, 0x00, 0x05, 0x3a, 0x70, 0x61, 0x74, 0x68, 0x00, 0x00, 0x00, 0x04, 0x2f, 0x66, 0x6f, 0x6f, 0x00, 0x00, 0x00, 0x07, 0x3a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x3a, 0x76, 0x65, 0x72, 0x73, '\x96', 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, 0x2e, 0x31, }; absl::string_view data(arr, ABSL_ARRAYSIZE(arr)); QuicStreamFrame frame(stream_->id(), true, 0, data); stream_->OnStreamFrame(frame); } class TestQuicSimpleServerBackend : public QuicSimpleServerBackend { public: TestQuicSimpleServerBackend() = default; ~TestQuicSimpleServerBackend() override = default; bool InitializeBackend(const std::string& ) override { return true; } bool IsBackendInitialized() const override { return true; } MOCK_METHOD(void, FetchResponseFromBackend, (const spdy::Http2HeaderBlock&, const std::string&, RequestHandler*), (override)); MOCK_METHOD(void, HandleConnectHeaders, (const spdy::Http2HeaderBlock&, RequestHandler*), (override)); MOCK_METHOD(void, HandleConnectData, (absl::string_view, bool, RequestHandler*), (override)); void CloseBackendResponseStream( RequestHandler* ) override {} }; ACTION_P(SendHeadersResponse, response_ptr) { arg1->OnResponseBackendComplete(response_ptr); } ACTION_P(SendStreamData, data, close_stream) { arg2->SendStreamData(data, close_stream); } ACTION_P(TerminateStream, error) { arg1->TerminateStreamWithError(error); } TEST_P(QuicSimpleServerStreamTest, ConnectSendsIntermediateResponses) { auto test_backend = std::make_unique<TestQuicSimpleServerBackend>(); TestQuicSimpleServerBackend* test_backend_ptr = test_backend.get(); ReplaceBackend(std::move(test_backend)); constexpr absl::string_view kRequestBody = "\x11\x11"; spdy::Http2HeaderBlock response_headers; response_headers[":status"] = "200"; QuicBackendResponse headers_response; headers_response.set_headers(response_headers.Clone()); headers_response.set_response_type(QuicBackendResponse::INCOMPLETE_RESPONSE); constexpr absl::string_view kBody1 = "\x22\x22"; constexpr absl::string_view kBody2 = "\x33\x33"; InSequence s; EXPECT_CALL(*test_backend_ptr, HandleConnectHeaders(_, _)) .WillOnce(SendHeadersResponse(&headers_response)); EXPECT_CALL(*stream_, WriteHeadersMock(false)); EXPECT_CALL(*test_backend_ptr, HandleConnectData(kRequestBody, false, _)) .WillOnce(SendStreamData(kBody1, false)); EXPECT_CALL(*stream_, WriteOrBufferBody(kBody1, false)); EXPECT_CALL(*test_backend_ptr, HandleConnectData(kRequestBody, true, _)) .WillOnce(SendStreamData(kBody2, true)); EXPECT_CALL(*stream_, WriteOrBufferBody(kBody2, true)); QuicHeaderList header_list; header_list.OnHeaderBlockStart(); header_list.OnHeader(":authority", "www.google.com:4433"); header_list.OnHeader(":method", "CONNECT"); header_list.OnHeaderBlockEnd(128, 128); stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list); quiche::QuicheBuffer h
void QuicSimpleServerStream::SendErrorResponse(int resp_code) { QUIC_DVLOG(1) << "Stream " << id() << " sending error response."; if (!reading_stopped()) { StopReading(); } Http2HeaderBlock headers; if (resp_code <= 0) { headers[":status"] = "500"; } else { headers[":status"] = absl::StrCat(resp_code); } headers["content-length"] = absl::StrCat(strlen(kErrorResponseBody)); SendHeadersAndBody(std::move(headers), kErrorResponseBody); }
TEST_P(QuicSimpleServerStreamTest, TestSendErrorResponse) { QuicStreamPeer::SetFinReceived(stream_); InSequence s; EXPECT_CALL(*stream_, WriteHeadersMock(false)); if (UsesHttp3()) { EXPECT_CALL(session_, WritevData(_, kDataFrameHeaderLength, _, NO_FIN, _, _)); } EXPECT_CALL(session_, WritevData(_, kErrorLength, _, FIN, _, _)); stream_->DoSendErrorResponse(); EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_)); EXPECT_TRUE(stream_->write_side_closed()); }
#define EIGEN_USE_THREADS #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) #define EIGEN_USE_GPU #endif #include "tensorflow/core/kernels/one_hot_op.h" #include <memory> #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/overflow.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename Device, typename T, typename TI> class OneHotOp : public OpKernel { public: explicit OneHotOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_)); } void Compute(OpKernelContext* ctx) override { const Tensor& indices = ctx->input(0); const Tensor& depth = ctx->input(1); const Tensor& on_value = ctx->input(2); const Tensor& off_value = ctx->input(3); const TensorShape& indices_shape = indices.shape(); const int indices_dims = indices_shape.dims(); const int output_dims = indices_dims + 1; OP_REQUIRES( ctx, axis_ == -1 || (axis_ >= 0 && axis_ < output_dims), errors::InvalidArgument("Expected axis to be -1 or between [0, ", output_dims, "). But received: ", axis_)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(depth.shape()), errors::InvalidArgument("depth must be a scalar, but got: ", depth.shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(on_value.shape()), errors::InvalidArgument("on_value must be a scalar, but got: ", on_value.shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(off_value.shape()), errors::InvalidArgument("off_value must be a scalar, but got: ", off_value.shape().DebugString())); const int axis = (axis_ == -1) ? indices_dims : axis_; const int32_t depth_v = depth.scalar<int32>()(); OP_REQUIRES( ctx, depth_v >= 0, errors::InvalidArgument("depth must be non-negative, got: ", depth_v)); OP_REQUIRES( ctx, MultiplyWithoutOverflow(indices_shape.num_elements(), depth_v) >= 0, errors::InvalidArgument("OneHot result would have shape ", indices_shape.DebugString(), " + [", depth_v, "], which exceeds 2**63 - 1 elements")); TensorShape output_shape = indices_shape; output_shape.InsertDim(axis, depth_v); auto on_value_t = on_value.scalar<T>(); auto off_value_t = off_value.scalar<T>(); Tensor* output; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &output)); if (output_shape.num_elements() > 0) { int64_t prefix_dim_size = 1; for (int i = 0; i < axis; ++i) { prefix_dim_size *= indices_shape.dim_size(i); } int64_t suffix_dim_size = indices_shape.num_elements() / prefix_dim_size; auto indices_t = indices.shaped<TI, 2>({prefix_dim_size, suffix_dim_size}); auto output_t = output->shaped<T, 3>({prefix_dim_size, depth_v, suffix_dim_size}); functor::OneHot<Device, T, TI>::Compute(ctx->eigen_device<Device>(), indices_t, on_value_t, off_value_t, &output_t); } } private: int32 axis_; OneHotOp(const OneHotOp&) = delete; void operator=(const OneHotOp&) = delete; }; #define REGISTER_ONE_HOT_INDEX(type, index_type) \ REGISTER_KERNEL_BUILDER(Name("OneHot") \ .Device(DEVICE_CPU) \ .TypeConstraint<index_type>("TI") \ .TypeConstraint<type>("T") \ .HostMemory("depth"), \ OneHotOp<CPUDevice, type, index_type>); #define REGISTER_ONE_HOT(type) \ REGISTER_ONE_HOT_INDEX(type, uint8); \ REGISTER_ONE_HOT_INDEX(type, int8); \ REGISTER_ONE_HOT_INDEX(type, int32); \ REGISTER_ONE_HOT_INDEX(type, int64_t) TF_CALL_ALL_TYPES(REGISTER_ONE_HOT); #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) namespace functor { #define DECLARE_GPU_SPEC_INDEX(T, TI) \ template <> \ void OneHot<GPUDevice, T, TI>::Compute( \ const GPUDevice& d, const typename TTypes<TI>::ConstMatrix& indices, \ const typename TTypes<T>::ConstScalar& on_value, \ const typename TTypes<T>::ConstScalar& off_value, \ typename TTypes<T, 3>::Tensor* output); \ extern template struct OneHot<GPUDevice, T, TI>; #define DECLARE_GPU_SPEC(T) \ DECLARE_GPU_SPEC_INDEX(T, uint8); \ DECLARE_GPU_SPEC_INDEX(T, int8); \ DECLARE_GPU_SPEC_INDEX(T, int32); \ DECLARE_GPU_SPEC_INDEX(T, int64_t); TF_CALL_int8(DECLARE_GPU_SPEC); TF_CALL_int32(DECLARE_GPU_SPEC); TF_CALL_int64(DECLARE_GPU_SPEC); TF_CALL_GPU_ALL_TYPES(DECLARE_GPU_SPEC); #undef DECLARE_GPU_SPEC_INDEX #undef DECLARE_GPU_SPEC } #define REGISTER_ONE_HOT_GPU_INDEX(type, index_type) \ REGISTER_KERNEL_BUILDER(Name("OneHot") \ .Device(DEVICE_GPU) \ .TypeConstraint<index_type>("TI") \ .TypeConstraint<type>("T") \ .HostMemory("depth"), \ OneHotOp<GPUDevice, type, index_type>); #define REGISTER_ONE_HOT_GPU(type) \ REGISTER_ONE_HOT_GPU_INDEX(type, uint8); \ REGISTER_ONE_HOT_GPU_INDEX(type, int8); \ REGISTER_ONE_HOT_GPU_INDEX(type, int32); \ REGISTER_ONE_HOT_GPU_INDEX(type, int64_t); TF_CALL_int8(REGISTER_ONE_HOT_GPU); TF_CALL_int32(REGISTER_ONE_HOT_GPU); TF_CALL_int64(REGISTER_ONE_HOT_GPU); TF_CALL_GPU_ALL_TYPES(REGISTER_ONE_HOT_GPU); #undef REGISTER_ONE_HOT_GPU_INDEX #undef REGISTER_ONE_HOT_GPU #endif }
#include <random> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static Graph* OneHot(int batch_size, int num_classes, int axis) { Graph* g = new Graph(OpRegistry::Global()); Tensor indices(DT_INT32, TensorShape({batch_size})); std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist(0, num_classes - 1); auto indices_t = indices.flat<int32>(); for (int i = 0; i < batch_size; ++i) { indices_t(i) = dist(gen); } Tensor depth(DT_INT32, TensorShape({})); depth.scalar<int32>()() = num_classes; Tensor on_value(DT_FLOAT, TensorShape({})); on_value.scalar<float>()() = 1.0f; Tensor off_value(DT_FLOAT, TensorShape({})); off_value.scalar<float>()() = 0.0f; test::graph::Multi(g, "OneHot", { test::graph::Constant(g, indices), test::graph::Constant(g, depth), test::graph::Constant(g, on_value), test::graph::Constant(g, off_value), }) ->AddAttr("axis", axis); return g; } #define BM_OneHot(BATCH, CLASS, AXIS, DEVICE) \ static void BM_OneHot##_##BATCH##_##CLASS##_##AXIS##_##DEVICE( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, OneHot(BATCH, CLASS, AXIS), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * BATCH * \ CLASS); \ } \ BENCHMARK(BM_OneHot##_##BATCH##_##CLASS##_##AXIS##_##DEVICE); BM_OneHot(32, 512, 1, cpu); BM_OneHot(64, 512, 1, cpu); BM_OneHot(128, 512, 1, cpu); BM_OneHot(32, 1024, 1, cpu); BM_OneHot(64, 1024, 1, cpu); BM_OneHot(128, 1024, 1, cpu); BM_OneHot(32, 10000, 1, cpu); BM_OneHot(64, 10000, 1, cpu); BM_OneHot(128, 10000, 1, cpu); BM_OneHot(32, 512, 0, cpu); BM_OneHot(64, 512, 0, cpu); BM_OneHot(128, 512, 0, cpu); BM_OneHot(32, 1024, 0, cpu); BM_OneHot(64, 1024, 0, cpu); BM_OneHot(128, 1024, 0, cpu); BM_OneHot(32, 10000, 0, cpu); BM_OneHot(64, 10000, 0, cpu); BM_OneHot(128, 10000, 0, cpu); }
template <typename Device, typename T, typename TI> class OneHotOp : public OpKernel { public: explicit OneHotOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_)); }
#include "tensorflow/core/kernels/data/cache_dataset_ops.h" #include <atomic> #include <cstddef> #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/status/status.h" #include "tensorflow/core/data/global_shuffle_utils.h" #include "tensorflow/core/data/name_utils.h" #include "tensorflow/core/data/serialization_utils.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/dataset_options.pb.h" #include "tensorflow/core/framework/partial_tensor_shape.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/data/cache_ops.h" #include "tensorflow/core/kernels/data/iterator_ops.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/refcount.h" #include "tensorflow/core/util/tensor_bundle/naming.h" #include "tensorflow/core/util/tensor_bundle/tensor_bundle.h" namespace tensorflow { namespace data { constexpr const char* const CacheDatasetOp::kDatasetType; constexpr const char* const CacheDatasetOp::kInputDataset; constexpr const char* const CacheDatasetOp::kFileName; constexpr const char* const CacheDatasetOp::kOutputTypes; constexpr const char* const CacheDatasetOp::kOutputShapes; namespace { constexpr char kKeyStrFormat[] = "%%%zuzu_%%%zuzu"; constexpr char kPaddingSizeStrFormat[] = "%zu"; constexpr char kFileDatasetPrefix[] = "File"; constexpr char kMode[] = "Mode"; constexpr char kLockFileSuffix[] = ".lockfile"; constexpr char kIterationCompleted[] = "iteration_completed"; constexpr char kCurIndex[] = "cur_index"; constexpr char kShardId[] = "shard_id"; constexpr char kCreatedAt[] = "Created at"; constexpr char kMemoryDatasetPrefix[] = "Memory"; constexpr char kMemoryCache[] = "MemoryCache"; constexpr char kCacheCompleted[] = "cache_completed"; constexpr char kIndex[] = "index"; constexpr char kImpl[] = "Impl"; constexpr char kCacheDataset[] = "CacheDataset"; constexpr char kIncompleteCacheErrorMessage[] = "The calling iterator did not fully read the dataset being cached. In " "order to avoid unexpected truncation of the dataset, the partially cached " "contents of the dataset will be discarded. This can happen if you have " "an input pipeline similar to `dataset.cache().take(k).repeat()`. You " "should use `dataset.take(k).cache().repeat()` instead."; } class DatasetRandomAccessCache { public: explicit DatasetRandomAccessCache(const DatasetBase* dataset) : input_(dataset) {} Status Get(OpKernelContext* ctx, int64 index, std::vector<Tensor>* out_tensors) { if (!iter_resource_) { TF_ASSIGN_OR_RETURN(iter_resource_, GetIteratorResourceFromDataset(ctx, input_)); TF_RETURN_IF_ERROR(iter_resource_->SetIteratorFromDataset(ctx, input_)); } if (index >= cache_.size()) { TF_RETURN_IF_ERROR(ExtendTempCacheToIndex(index, ctx)); } *out_tensors = cache_.at(index); return absl::OkStatus(); } std::vector<std::vector<Tensor>> GetCacheData() { return cache_; } private: Status ExtendTempCacheToIndex(int64 index, OpKernelContext* ctx) { bool end_of_sequence; while (cache_.size() <= index) { std::vector<Tensor> out_tensors; TF_RETURN_IF_ERROR( iter_resource_->GetNext(ctx, &out_tensors, &end_of_sequence)); if (end_of_sequence) { return tensorflow::errors::OutOfRange("Index out of range [0, ", cache_.size(), "):", index); } cache_.push_back(out_tensors); } return absl::OkStatus(); } absl::StatusOr<core::RefCountPtr<IteratorResource>> GetIteratorResourceFromDataset(OpKernelContext* ctx, const DatasetBase* dataset) { FunctionLibraryRuntime* flr; std::unique_ptr<DeviceMgr> device_mgr(nullptr); std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr); std::unique_ptr<ProcessFunctionLibraryRuntime> plfr(nullptr); TF_RETURN_IF_ERROR( ctx->function_library()->Clone(&flib_def, &plfr, &flr, true)); core::RefCountPtr<IteratorResource> iter_resource(new IteratorResource( ctx->env(), dataset->output_dtypes(), dataset->output_shapes(), std::move(device_mgr), std::move(flib_def), std::move(plfr), flr)); return iter_resource; } const DatasetBase* input_; core::RefCountPtr<IteratorResource> iter_resource_; std::vector<std::vector<Tensor>> cache_; }; class IteratorRandomAccessCache { public: explicit IteratorRandomAccessCache(const DatasetBase* input) : input_(input) {} absl::Status Get(AnyContext ctx, size_t element_position, std::vector<Tensor>* out_tensors) { if (element_position < cache_.size() && !cache_[element_position].empty()) { *out_tensors = cache_[element_position]; return absl::OkStatus(); } TF_RETURN_IF_ERROR(input_->Get(ctx, element_position, out_tensors)); if (element_position >= cache_.size()) { cache_.resize(element_position + 1); } cache_[element_position] = *out_tensors; return absl::OkStatus(); } private: const DatasetBase* input_ = nullptr; std::vector<std::vector<Tensor>> cache_; }; class CacheDatasetOp::FileDatasetBase : public DatasetBase { public: FileDatasetBase(OpKernelContext* ctx, const DatasetBase* input, string filename, Env* env) : DatasetBase(DatasetContext(ctx)), input_(input), filename_(std::move(filename)), env_(env), num_tensors_(input->output_dtypes().size()), tensor_index_padding_size_(StringPaddingSize(num_tensors_)), item_index_padding_size_(StringPaddingSize(kMaxItems)), tensor_format_string_(strings::Printf(kKeyStrFormat, item_index_padding_size_, tensor_index_padding_size_)) { input_->Ref(); DCHECK_EQ(item_index_padding_size_, 7); } ~FileDatasetBase() override { input_->Unref(); } std::unique_ptr<IteratorBase> MakeIteratorInternal( const string& prefix) const override { name_utils::IteratorPrefixParams params; params.dataset_prefix = kFileDatasetPrefix; return std::make_unique<FileIterator>(FileIterator::Params{ this, name_utils::IteratorPrefix(kDatasetType, prefix, params)}); } const DataTypeVector& output_dtypes() const override { return input_->output_dtypes(); } const std::vector<PartialTensorShape>& output_shapes() const override { return input_->output_shapes(); } string DebugString() const override { name_utils::DatasetDebugStringParams params; params.dataset_prefix = kFileDatasetPrefix; return name_utils::DatasetDebugString(kDatasetType, params); } int64_t CardinalityInternal(CardinalityOptions options) const override { return input_->Cardinality(options); } Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override { inputs->push_back(input_); return absl::OkStatus(); } Status CheckExternalState() const override { return input_->CheckExternalState(); } protected: const DatasetBase* const input_; const tstring filename_; private: static size_t StringPaddingSize(size_t num_tensors) { return strings::Printf(kPaddingSizeStrFormat, num_tensors - 1).size(); } string FormatName(size_t item_index, size_t tensor_index) const { return strings::Printf(tensor_format_string_.c_str(), item_index, tensor_index); } class FileIterator : public DatasetIterator<FileDatasetBase> { public: explicit FileIterator(const Params& params) : DatasetIterator<FileDatasetBase>(params) { if (params.dataset->env_ ->FileExists(MetaFilename(params.dataset->filename_)) .ok()) { mode_ = Mode::read; } else { mode_ = Mode::write; } } Status Initialize(IteratorContext* ctx) override { mutex_lock l(mu_); return InitializeIterator(ctx); } Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { mutex_lock l(mu_); return iterator_->GetNext(ctx, out_tensors, end_of_sequence); } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { return model::MakeKnownRatioNode(std::move(args), 1); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override { mutex_lock l(mu_); TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kMode, mode_)); return SaveInput(ctx, writer, iterator_); } Status RestoreInternal(IteratorContext* ctx, IteratorStateReader* reader) override { mutex_lock l(mu_); { int64_t temp; TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kMode, &temp)); mode_ = static_cast<Mode>(temp); } if (mode_ == Mode::write && dataset() ->env_->FileExists(MetaFilename(dataset()->filename_)) .ok()) { LOG(WARNING) << "It looks like the cache was already completely written(" << MetaFilename(dataset()->filename_) << ") after the last checkpoint was saved. Attempting to read " << "the cache instead of continuing to write. If this is a " << "mistake, please remove the above file and try running again."; mode_ = Mode::read; } TF_RETURN_IF_ERROR(InitializeIterator(ctx)); return RestoreInput(ctx, reader, iterator_); } private: class FileWriterIterator : public DatasetIterator<FileDatasetBase> { public: explicit FileWriterIterator(const Params& params) : DatasetIterator<FileDatasetBase>(params), cur_index_(0), shard_id_(0), filename_( strings::StrCat(params.dataset->filename_, "_", shard_id_)), lockfile_(strings::StrCat(filename_, kLockFileSuffix)), lockfile_created_(false), iteration_completed_(false) {} ~FileWriterIterator() override { if (!dataset()->env_->FileExists(MetaFilename(filename_)).ok()) { LOG(WARNING) << kIncompleteCacheErrorMessage; std::vector<string> cache_files; Status s = dataset()->env_->GetMatchingPaths( strings::StrCat(filename_, "*"), &cache_files); if (!s.ok()) { LOG(WARNING) << "Failed to get matching files on " << filename_ << "* : " << s.ToString(); } for (const string& path : cache_files) { s = dataset()->env_->DeleteFile(path); if (!s.ok()) { LOG(WARNING) << "Failed to delete " << path << " : " << s.ToString(); } } } } Status Initialize(IteratorContext* ctx) override { return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_); } Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { mutex_lock l(mu_); *end_of_sequence = false; TF_RETURN_IF_ERROR(EnsureLockFileExists(end_of_sequence)); if (*end_of_sequence) { return absl::OkStatus(); } TF_RETURN_IF_ERROR(writer_->status()); if (cur_index_ >= kMaxItems) { Status s = Finish(); if (!s.ok()) { LOG(ERROR) << s; } return errors::InvalidArgument( "Upstream iterator is producing more than ", kMaxItems, " items, which is more than the cache limit."); } TF_RETURN_IF_ERROR( input_impl_->GetNext(ctx, out_tensors, end_of_sequence)); if (*end_of_sequence && out_tensors->empty()) { TF_RETURN_IF_ERROR(Finish()); cur_index_++; return absl::OkStatus(); } if (out_tensors->size() != dataset()->num_tensors_) { return errors::Internal( "Upstream iterator returned invalid number of tensors. " "Expected ", dataset()->num_tensors_, " got: ", out_tensors->size()); } size_t tensor_index = 0; for (const Tensor& t : *out_tensors) { DCHECK_LT(tensor_index, dataset()->num_tensors_); string key = dataset()->FormatName(cur_index_, tensor_index++); TF_RETURN_IF_ERROR(writer_->Add(key, t)); } if (*end_of_sequence) { TF_RETURN_IF_ERROR(Finish()); } cur_index_++; return absl::OkStatus(); } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { return model::MakeKnownRatioNode(std::move(args), 1); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override { mutex_lock l(mu_); TF_RETURN_IF_ERROR( writer->WriteScalar(prefix(), kCurIndex, cur_index_)); if (iteration_completed_) { TF_RETURN_IF_ERROR( writer->WriteScalar(prefix(), kIterationCompleted, "")); return absl::OkStatus(); } if (lockfile_created_) { TF_RETURN_IF_ERROR(writer_->Finish()); shard_id_++; filename_ = strings::StrCat(dataset()->filename_, "_", shard_id_); lockfile_ = strings::StrCat(filename_, kLockFileSuffix); lockfile_created_ = false; } TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_)); TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kShardId, shard_id_)); return absl::OkStatus(); } Status RestoreInternal(IteratorContext* ctx, IteratorStateReader* reader) override { mutex_lock l(mu_); int64_t temp; { TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &temp)); cur_index_ = static_cast<size_t>(temp); if (cur_index_ != temp) { return errors::Internal("Invalid value for cur_index ", temp); } } if (reader->Contains(prefix(), kIterationCompleted)) { iteration_completed_ = true; return absl::OkStatus(); } TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_)); { TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kShardId, &temp)); shard_id_ = static_cast<size_t>(temp); if (shard_id_ != temp) { return errors::Internal("Invalid value for shard_id ", temp); } } filename_ = strings::StrCat(dataset()->filename_, "_", shard_id_); lockfile_ = strings::StrCat(filename_, kLockFileSuffix); writer_ = std::make_unique<BundleWriter>(dataset()->env_, filename_); return absl::OkStatus(); } private: Status EnsureLockFileExists(bool* end_of_sequence) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (iteration_completed_) { *end_of_sequence = true; return absl::OkStatus(); } if (lockfile_created_) { return absl::OkStatus(); } if (dataset()->env_->FileExists(MetaFilename(filename_)).ok()) { return errors::AlreadyExists("Existing cache files found: \n", MetaFilename(filename_), "\n", DataFilename(filename_, 0, 1), "\n", "To continue delete the above files."); } if (dataset()->env_->FileExists(lockfile_).ok()) { char contents_scratch[151] = {0}; StringPiece contents; std::unique_ptr<RandomAccessFile> file; if (dataset()->env_->NewRandomAccessFile(lockfile_, &file).ok()) { file->Read(0, 150, &contents, contents_scratch).IgnoreError(); } return errors::AlreadyExists( "There appears to be a concurrent caching iterator running - " "cache lockfile already exists ('", lockfile_, "'). If you are sure no other running TF computations are " "using this cache prefix, delete the lockfile and " "re-initialize the iterator. Lockfile contents: ", contents); } std::unique_ptr<WritableFile> lockfile; TF_RETURN_IF_ERROR( dataset()->env_->NewWritableFile(lockfile_, &lockfile)); TF_RETURN_IF_ERROR(lockfile->Append( strings::StrCat(kCreatedAt, ": ", EnvTime::NowSeconds()))); writer_ = std::make_unique<BundleWriter>(dataset()->env_, filename_); lockfile_created_ = true; return absl::OkStatus(); } Status Finish() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { iteration_completed_ = true; TF_RETURN_IF_ERROR(writer_->Finish()); { std::vector<tstring> prefixes; prefixes.reserve(shard_id_ + 1); for (size_t i = 0; i <= shard_id_; ++i) { prefixes.emplace_back( strings::StrCat(dataset()->filename_, "_", i)); } TF_RETURN_IF_ERROR( MergeBundles(dataset()->env_, prefixes, dataset()->filename_)); } for (size_t i = 0; i <= shard_id_; ++i) { TF_RETURN_IF_ERROR(dataset()->env_->DeleteFile( strings::StrCat(dataset()->filename_, "_", i, kLockFileSuffix))); } return absl::OkStatus(); } mutex mu_; size_t cur_index_ TF_GUARDED_BY(mu_); size_t shard_id_ TF_GUARDED_BY(mu_); std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_); string filename_; std::unique_ptr<BundleWriter> writer_ TF_GUARDED_BY(mu_); string lockfile_ TF_GUARDED_BY(mu_); bool lockfile_created_ TF_GUARDED_BY(mu_); bool iteration_completed_ TF_GUARDED_BY(mu_); }; class FileReaderIterator : public DatasetIterator<FileDatasetBase> { public: explicit FileReaderIterator(const Params& params) : DatasetIterator<FileDatasetBase>(params), cur_index_(0), reader_(dataset()->env_, dataset()->filename_), iterator_restored_(false) {} Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { mutex_lock l(mu_); *end_of_sequence = false; TF_RETURN_IF_ERROR(reader_.status()); if (!reader_.Valid()) { *end_of_sequence = true; return absl::OkStatus(); } out_tensors->clear(); out_tensors->resize(dataset()->num_tensors_); for (size_t i = 0; i < dataset()->num_tensors_; ++i) { if (!iterator_restored_) { reader_.Next(); } else { iterator_restored_ = false; } if (!reader_.Valid()) { out_tensors->clear(); *end_of_sequence = true; return absl::OkStatus(); } StringPiece key = reader_.key(); DCHECK_EQ(key, dataset()->FormatName(cur_index_, i)); TF_RETURN_IF_ERROR(reader_.ReadCurrent(&(*out_tensors)[i])); TF_RETURN_IF_ERROR(reader_.status()); } cur_index_++; return absl::OkStatus(); } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { return model::MakeKnownRatioNode(std::move(args), 1); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override { mutex_lock l(mu_); TF_RETURN_IF_ERROR( writer->WriteScalar(prefix(), kCurIndex, cur_index_)); return absl::OkStatus(); } Status RestoreInternal( IteratorContext* ctx, IteratorStateReader* iterator_state_reader) override { mutex_lock l(mu_); { int64_t temp; TF_RETURN_IF_ERROR( iterator_state_reader->ReadScalar(prefix(), kCurIndex, &temp)); cur_index_ = static_cast<size_t>(temp); if (cur_index_ != temp) { return errors::Internal("Invalid value for cur_index ", temp); } } if (!reader_.Valid()) { return errors::Internal("Error initializing BundleReader."); } reader_.Seek(dataset()->FormatName(cur_index_, 0)); iterator_restored_ = true; return absl::OkStatus(); } private: mutex mu_; size_t cur_index_ TF_GUARDED_BY(mu_); BundleReader reader_ TF_GUARDED_BY(mu_); bool iterator_restored_ TF_GUARDED_BY(mu_); }; Status InitializeIterator(IteratorContext* ctx) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { switch (mode_) { case Mode::read: iterator_ = std::make_unique<FileReaderIterator>(FileReaderIterator::Params{ dataset(), strings::StrCat(prefix(), kImpl)}); break; case Mode::write: iterator_ = std::make_unique<FileWriterIterator>(FileWriterIterator::Params{ dataset(), strings::StrCat(prefix(), kImpl)}); } TF_RETURN_IF_ERROR(iterator_->InitializeBase(ctx, this)); return iterator_->Initialize(ctx); } mutex mu_; enum Mode { read, write }; Mode mode_ TF_GUARDED_BY(mu_); std::unique_ptr<IteratorBase> iterator_ TF_GUARDED_BY(mu_); }; Env* const env_; const size_t num_tensors_; const size_t tensor_index_padding_size_; static constexpr size_t kMaxItems = 10000000; const size_t item_index_padding_size_; const string tensor_format_string_; }; class CacheDatasetOp::FileDataset : public CacheDatasetOp::FileDatasetBase { public: using FileDatasetBase::FileDatasetBase; protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph = nullptr; TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph)); Node* filename = nullptr; TF_RETURN_IF_ERROR(b->AddScalar(filename_, &filename)); TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph, filename}, output)); return absl::OkStatus(); } }; class CacheDatasetOp::FileDatasetV2 : public CacheDatasetOp::FileDatasetBase { public: explicit FileDatasetV2(OpKernelContext* ctx, const DatasetBase* input, string filename, Env* env, const Tensor& resource_handle) : FileDatasetBase(ctx, input, filename, env), resource_handle_(resource_handle) {} protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_node = nullptr; TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node)); Node* filename_node = nullptr; TF_RETURN_IF_ERROR(b->AddScalar(filename_, &filename_node)); Node* resource_handle_node = nullptr; TF_RETURN_IF_ERROR(b->AddTensor(resource_handle_, &resource_handle_node)); TF_RETURN_IF_ERROR(b->AddDataset( this, {input_node, filename_node, resource_handle_node}, output)); return absl::OkStatus(); } private: const Tensor resource_handle_; }; class CacheDatasetOp::MemoryDatasetBase : public DatasetBase { public: explicit MemoryDatasetBase(OpKernelContext* ctx, const DatasetBase* input, std::shared_ptr<MemoryCache> cache) : DatasetBase(DatasetContext(ctx)), input_(input), cache_(std::move(cache)) { input_->Ref(); random_indexing_compatible_ = input_->RandomIndexingCompatible(); } ~MemoryDatasetBase() override { input_->Unref(); } std::unique_ptr<IteratorBase> MakeIteratorInternal( const string& prefix) const override { name_utils::IteratorPrefixParams params; params.dataset_prefix = kMemoryDatasetPrefix; return std::make_unique<MemoryIterator>( MemoryIterator::Params{ this, name_utils::IteratorPrefix(kDatasetType, prefix, params)}, cache_.get()); } const DataTypeVector& output_dtypes() const override { return input_->output_dtypes(); } const std::vector<PartialTensorShape>& output_shapes() const override { return input_->output_shapes(); } string DebugString() const override { name_utils::DatasetDebugStringParams params; params.dataset_prefix = kMemoryDatasetPrefix; return name_utils::DatasetDebugString(kDatasetType, params); } int64_t CardinalityInternal(CardinalityOptions options) const override { return input_->Cardinality(options); }; Status Get(OpKernelContext* ctx, int64 index, std::vector<Tensor>* out_tensors) const override { mutex_lock l(mu_); CardinalityOptions options; options.set_compute_level(CardinalityOptions::CARDINALITY_COMPUTE_LOW); int64_t cardinality = Cardinality(options); if (cardinality != kUnknownCardinality && cardinality != kInfiniteCardinality && index >= cardinality) { return errors::OutOfRange("Index out of range [0, ", cardinality, "):", index); } if (!dataset_random_access_cache_) { dataset_random_access_cache_ = std::make_unique<DatasetRandomAccessCache>(input_); } return dataset_random_access_cache_->Get(ctx, index, out_tensors); } Status Get(AnyContext ctx, int64 index, std::vector<Tensor>* out_tensors) const override { mutex_lock l(mu_); if (!iterator_random_access_cache_) { iterator_random_access_cache_ = std::make_unique<IteratorRandomAccessCache>(input_); } return iterator_random_access_cache_->Get(ctx, index, out_tensors); } Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override { inputs->push_back(input_); return absl::OkStatus(); } Status CheckExternalState() const override { return input_->CheckExternalState(); } absl::Status RandomIndexingCompatible() con
#include "tensorflow/core/kernels/data/cache_dataset_ops.h" #include <string> #include <utility> #include "tensorflow/core/data/dataset_test_base.h" #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/serialization_utils.h" #include "tensorflow/core/platform/path.h" namespace tensorflow { namespace data { namespace { constexpr char kNodeName[] = "cache_dataset"; constexpr char kFileDatasetPrefix[] = "File"; constexpr char kMemoryDatasetPrefix[] = "Memory"; class CacheDatasetParams : public DatasetParams { public: template <typename T> CacheDatasetParams(T input_dataset_params, string filename, DataTypeVector output_dtypes, std::vector<PartialTensorShape> output_shapes, string node_name) : DatasetParams(std::move(output_dtypes), std::move(output_shapes), std::move(node_name)), filename_(filename) { input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params)); iterator_prefix_ = name_utils::IteratorPrefix(input_dataset_params.dataset_type(), input_dataset_params.iterator_prefix()); } std::vector<Tensor> GetInputTensors() const override { Tensor filename_tensor = CreateTensor<tstring>(TensorShape({}), {filename_}); return {filename_tensor}; } Status GetInputNames(std::vector<string>* input_names) const override { *input_names = {CacheDatasetOp::kInputDataset, CacheDatasetOp::kFileName}; return absl::OkStatus(); } Status GetAttributes(AttributeVector* attr_vector) const override { *attr_vector = {{"output_types", output_dtypes_}, {"output_shapes", output_shapes_}, {"metadata", ""}}; return absl::OkStatus(); } string dataset_type() const override { return CacheDatasetOp::kDatasetType; } string filename() const { return filename_; } private: string filename_; }; class CacheDatasetOpTest : public DatasetOpsTestBase { public: Status Initialize(const DatasetParams& dataset_params) { TF_RETURN_IF_ERROR(DatasetOpsTestBase::Initialize(dataset_params)); auto params = static_cast<const CacheDatasetParams&>(dataset_params); cache_filename_ = params.filename(); return absl::OkStatus(); } ~CacheDatasetOpTest() override { if (!cache_filename_.empty()) { std::vector<string> cache_files; Status s = device_->env()->GetMatchingPaths( strings::StrCat(cache_filename_, "*"), &cache_files); if (!s.ok()) { LOG(WARNING) << "Failed to get matching files on " << cache_filename_ << "* : " << s.ToString(); } for (const string& path : cache_files) { s = device_->env()->DeleteFile(path); if (!s.ok()) { LOG(WARNING) << "Failed to delete " << path << " : " << s.ToString(); } } } } protected: tstring cache_filename_; }; CacheDatasetParams CacheDatasetParams1() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{3, 3, 1}, {0, 1, 2, 3, 4, 5, 6, 7, 8})}, "tensor_slice"); return CacheDatasetParams( std::move(tensor_slice_dataset_params), io::JoinPath(testing::TmpDir(), "cache_data"), {DT_INT64}, {PartialTensorShape({3, 1})}, kNodeName); } CacheDatasetParams CacheDatasetParams2() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{0}, {})}, "tensor_slice"); return CacheDatasetParams( std::move(tensor_slice_dataset_params), io::JoinPath(testing::TmpDir(), "cache_data"), {DT_INT64}, {PartialTensorShape({})}, kNodeName); } CacheDatasetParams CacheDatasetParams3() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{3, 3, 1}, {0, 1, 2, 3, 4, 5, 6, 7, 8})}, "tensor_slice"); return CacheDatasetParams(std::move(tensor_slice_dataset_params), "", {DT_INT64}, {PartialTensorShape({3, 1})}, kNodeName); } CacheDatasetParams CacheDatasetParams4() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{0}, {})}, "tensor_slice"); return CacheDatasetParams(std::move(tensor_slice_dataset_params), "", {DT_INT64}, {PartialTensorShape({})}, kNodeName); } std::vector<GetNextTestCase<CacheDatasetParams>> GetNextTestCases() { return {{CacheDatasetParams1(), CreateTensors<int64_t>(TensorShape({3, 1}), {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})}, {CacheDatasetParams2(), {}}, {CacheDatasetParams3(), CreateTensors<int64_t>(TensorShape({3, 1}), {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})}, {CacheDatasetParams4(), {}}}; } class ParameterizedGetNextTest : public CacheDatasetOpTest, public ::testing::WithParamInterface< GetNextTestCase<CacheDatasetParams>> {}; TEST_P(ParameterizedGetNextTest, GetNext) { auto test_case = GetParam(); TF_ASSERT_OK(Initialize(test_case.dataset_params)); bool end_of_sequence = false; std::vector<Tensor> out_tensors; while (!end_of_sequence) { std::vector<Tensor> next; TF_EXPECT_OK( iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence)); out_tensors.insert(out_tensors.end(), next.begin(), next.end()); } TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs, true)); TF_ASSERT_OK(dataset_->MakeIterator( iterator_ctx_.get(), nullptr, test_case.dataset_params.iterator_prefix(), &iterator_)); end_of_sequence = false; out_tensors.clear(); while (!end_of_sequence) { std::vector<Tensor> next; TF_EXPECT_OK( iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence)); out_tensors.insert(out_tensors.end(), next.begin(), next.end()); } TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs, true)); } INSTANTIATE_TEST_SUITE_P(CacheDatasetOpTest, ParameterizedGetNextTest, ::testing::ValuesIn(GetNextTestCases())); TEST_F(CacheDatasetOpTest, DatasetNodeName) { auto dataset_params = CacheDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name())); } TEST_F(CacheDatasetOpTest, DatasetTypeString) { auto dataset_params = CacheDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK( CheckDatasetTypeString(name_utils::OpName(CacheDatasetOp::kDatasetType))); } TEST_F(CacheDatasetOpTest, DatasetOutputDtypes) { auto dataset_params = CacheDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64})); } std::vector<DatasetOutputShapesTestCase<CacheDatasetParams>> DatasetOutputShapesTestCases() { return {{CacheDatasetParams1(), {PartialTensorShape({3, 1})}}, {CacheDatasetParams2(), {PartialTensorShape({})}}, {CacheDatasetParams3(), {PartialTensorShape({3, 1})}}, {CacheDatasetParams4(), {PartialTensorShape({})}}}; } DATASET_OUTPUT_SHAPES_TEST_P(CacheDatasetOpTest, CacheDatasetParams, DatasetOutputShapesTestCases()) std::vector<CardinalityTestCase<CacheDatasetParams>> CardinalityTestCases() { return {{CacheDatasetParams1(), 3}, {CacheDatasetParams2(), 0}, {CacheDatasetParams3(), 3}, {CacheDatasetParams4(), 0}}; } DATASET_CARDINALITY_TEST_P(CacheDatasetOpTest, CacheDatasetParams, CardinalityTestCases()) TEST_F(CacheDatasetOpTest, IteratorOutputDtypes) { auto dataset_params = CacheDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64})); } std::vector<IteratorOutputShapesTestCase<CacheDatasetParams>> IteratorOutputShapesTestCases() { return {{CacheDatasetParams1(), {PartialTensorShape({3, 1})}}, {CacheDatasetParams2(), {PartialTensorShape({})}}, {CacheDatasetParams3(), {PartialTensorShape({3, 1})}}, {CacheDatasetParams4(), {PartialTensorShape({})}}}; } ITERATOR_OUTPUT_SHAPES_TEST_P(CacheDatasetOpTest, CacheDatasetParams, IteratorOutputShapesTestCases()) TEST_F(CacheDatasetOpTest, IteratorPrefix) { auto dataset_params = CacheDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); name_utils::IteratorPrefixParams iterator_prefix_params; iterator_prefix_params.dataset_prefix = cache_filename_.empty() ? kMemoryDatasetPrefix : kFileDatasetPrefix; TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix( CacheDatasetOp::kDatasetType, dataset_params.iterator_prefix(), iterator_prefix_params))); } std::vector<IteratorSaveAndRestoreTestCase<CacheDatasetParams>> IteratorSaveAndRestoreTestCases() { return {{CacheDatasetParams1(), {0, 2, 4, 11}, CreateTensors<int64_t>(TensorShape({3, 1}), {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})}, {CacheDatasetParams2(), {0, 2, 4, 11}, {}}, {CacheDatasetParams3(), {0, 2, 4, 11}, CreateTensors<int64_t>(TensorShape({3, 1}), {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})}, {CacheDatasetParams4(), {0, 2, 4, 11}, {}}}; } class ParameterizedIteratorSaveAndRestoreTest : public CacheDatasetOpTest, public ::testing::WithParamInterface< IteratorSaveAndRestoreTestCase<CacheDatasetParams>> {}; TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) { auto test_case = GetParam(); TF_ASSERT_OK(Initialize(test_case.dataset_params)); bool end_of_sequence = false; std::vector<Tensor> out_tensors; if (cache_filename_.empty()) { while (!end_of_sequence) { TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)); } end_of_sequence = false; out_tensors.clear(); TF_ASSERT_OK(dataset_->MakeIterator( iterator_ctx_.get(), nullptr, test_case.dataset_params.iterator_prefix(), &iterator_)); } std::unique_ptr<SerializationContext> serialization_ctx; TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx)); int cur_iteration = 0; auto expected_outputs_it = test_case.expected_outputs.begin(); for (int breakpoint : test_case.breakpoints) { VariantTensorDataWriter writer; TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer)); std::vector<const VariantTensorData*> data; writer.GetData(&data); VariantTensorDataReader reader(data); TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader, test_case.dataset_params.iterator_prefix(), *dataset_, &iterator_)); while (cur_iteration <= breakpoint) { out_tensors.clear(); TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)); if (!end_of_sequence) { EXPECT_LT(expected_outputs_it, test_case.expected_outputs.end()); TF_EXPECT_OK(ExpectEqual(out_tensors.back(), *expected_outputs_it)); expected_outputs_it++; } cur_iteration++; } if (breakpoint >= dataset_->Cardinality()) { EXPECT_TRUE(end_of_sequence); EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end()); } else { EXPECT_FALSE(end_of_sequence); } } } INSTANTIATE_TEST_CASE_P(CacheDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest, ::testing::ValuesIn(IteratorSaveAndRestoreTestCases())); } } }
Status Get(AnyContext ctx, int64 index, std::vector<Tensor>* out_tensors) const override { mutex_lock l(mu_); if (!iterator_random_access_cache_) { iterator_random_access_cache_ = std::make_unique<IteratorRandomAccessCache>(input_); } return iterator_random_access_cache_->Get(ctx, index, out_tensors); }
DATASET_CARDINALITY_TEST_P(CacheDatasetOpTest, CacheDatasetParams, CardinalityTestCases()) TEST_F(CacheDatasetOpTest, IteratorOutputDtypes) { auto dataset_params = CacheDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64})); }
#include "tensorflow/core/common_runtime/graph_view.h" #include <atomic> #include <deque> #include <memory> #include <string> #include <unordered_map> #include <vector> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/edgeset.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/util/determinism.h" #include "tensorflow/core/util/device_name_utils.h" namespace tensorflow { string NodeItem::DebugString() const { string ret = strings::StrCat("{name:'", kernel->name(), "' id:", node_id); if (is_source) { strings::StrAppend(&ret, " source}"); } else { strings::StrAppend(&ret, " def:{", SummarizeNodeDef(kernel->def()), "}}"); } return ret; } GraphView::~GraphView() { static_assert(std::is_trivially_destructible<AllocatorAttributes>::value, "Update code if AllocatorAttributes gains a destructor"); static_assert(std::is_trivially_destructible<EdgeInfo>::value, "Update code if EdgeInfo gains a destructor"); for (int i = 0; i < num_nodes_; i++) { NodeItem* n = node(i); if (n != nullptr) { n->NodeItem::~NodeItem(); } } delete[] node_offsets_; delete[] space_; } namespace { typedef std::tuple<int32, int32> OutputAndControlEdges; OutputAndControlEdges CountOutputEdges(const Node* n) { DCHECK_LE(n->out_edges().size(), kint32max); int32_t num_output_edges = 0; int32_t num_output_control_edges = 0; for (auto e : n->out_edges()) { if (IsSink(e->dst())) continue; if (e->IsControlEdge()) { ++num_output_control_edges; } else { ++num_output_edges; } } return OutputAndControlEdges(num_output_edges, num_output_control_edges); } } size_t GraphView::NodeItemBytes(const Node* n) { int32_t num_output_edges; int32_t num_output_control_edges; std::tie(num_output_edges, num_output_control_edges) = CountOutputEdges(n); const int num_inputs = n->num_inputs(); const int num_outputs = n->num_outputs(); const size_t raw_bytes = sizeof(NodeItem) + num_output_edges * sizeof(EdgeInfo) + num_output_control_edges * sizeof(ControlEdgeInfo) + num_outputs * sizeof(AllocatorAttributes) + num_outputs * sizeof(int) + num_inputs * sizeof(uint8) + num_outputs * sizeof(uint8); static constexpr size_t kItemAlignment = sizeof(NodeItem*); static_assert(kItemAlignment % alignof(NodeItem) == 0, "NodeItem must be aligned with kItemAlignment"); static_assert(kItemAlignment % alignof(EdgeInfo) == 0, "EdgeInfo must be aligned with kItemAlignment"); static_assert(kItemAlignment % alignof(ControlEdgeInfo) == 0, "ControlEdgeInfo must be aligned with kItemAlignment"); static_assert(kItemAlignment % alignof(AllocatorAttributes) == 0, "AllocatorAttributes must be aligned with kItemAlignment"); static_assert(sizeof(NodeItem) % alignof(EdgeInfo) == 0, "NodeItem must be aligned with EdgeInfo"); static_assert(sizeof(NodeItem) % alignof(AllocatorAttributes) == 0, "NodeItem must be aligned with AllocatorAttributes"); static_assert(sizeof(EdgeInfo) % alignof(AllocatorAttributes) == 0, "EdgeInfo must be aligned with AllocatorAttributes"); const size_t bytes = ((raw_bytes + kItemAlignment - 1) / kItemAlignment) * kItemAlignment; return bytes; } char* GraphView::InitializeNode(char* ptr, const Node* n) { const int id = n->id(); CHECK(node_offsets_[id] == kuint32max); const size_t bytes = NodeItemBytes(n); constexpr size_t kItemAlignment = sizeof(NodeItem*); CHECK_EQ(reinterpret_cast<uintptr_t>(ptr) % kItemAlignment, 0); NodeItem* item = reinterpret_cast<NodeItem*>(ptr); CHECK_LE(static_cast<int64_t>(ptr - space_), kuint32max); const uint32 offset = static_cast<uint32>(ptr - space_); node_offsets_[id] = offset; ptr += bytes; int32_t num_output_edges; int32_t num_output_control_edges; std::tie(num_output_edges, num_output_control_edges) = CountOutputEdges(n); const int num_inputs = n->num_inputs(); const int num_outputs = n->num_outputs(); new (item) NodeItem(); item->num_inputs = num_inputs; item->num_outputs = num_outputs; item->num_output_edges = num_output_edges; item->num_output_control_edges = num_output_control_edges; gtl::InlinedVector<EdgeInfo*, 4> last_indices(num_outputs, nullptr); EdgeInfo* dst_edge = item->output_edge_base(); for (auto e : n->out_edges()) { if (e->IsControlEdge()) continue; dst_edge->dst_id = e->dst()->id(); CHECK_LE(e->src_output(), 0x3FFFFFFF); dst_edge->output_slot = e->src_output(); dst_edge->is_last = false; const int output_slot = dst_edge->output_slot; if (output_slot >= 0) { last_indices[output_slot] = dst_edge; } dst_edge->input_slot = e->dst_input(); dst_edge++; } for (EdgeInfo* edge_info : last_indices) { if (edge_info != nullptr) { edge_info->is_last = true; } } ControlEdgeInfo* dst_control_edge = item->output_control_edge_base(); for (auto e : n->out_edges()) { if (!e->IsControlEdge() || IsSink(e->dst())) continue; dst_control_edge->dst_id = e->dst()->id(); dst_control_edge++; } AllocatorAttributes* output_attrs = item->output_attr_base(); for (int i = 0; i < num_outputs; i++) { new (&output_attrs[i]) AllocatorAttributes(); } DCHECK_LT(DataType_MAX, 255); uint8* input_types = item->input_type_base(); item->is_any_input_ref_typed = false; for (int i = 0; i < num_inputs; i++) { input_types[i] = static_cast<uint8>(n->input_type(i)); DCHECK_EQ(item->input_type(i), n->input_type(i)); item->is_any_input_ref_typed |= IsRefType(n->input_type(i)); } { std::vector<int> forward_input; Status fwd_status = GetNodeAttr(n->attrs(), "_forward_input", &forward_input); std::vector<int> scoped_allocator_attrs; Status sa_status = GetNodeAttr(n->attrs(), "_scoped_allocator", &scoped_allocator_attrs); int* forward_from = item->forward_from_base(); uint8* output_types = item->output_type_base(); for (int i = 0; i < num_outputs; ++i) { output_types[i] = static_cast<uint8>(n->output_type(i)); DCHECK_EQ(item->output_type(i), n->output_type(i)); forward_from[i] = OpKernelContext::Params::kNoReservation; if (sa_status.ok()) { for (int j = 0; j < scoped_allocator_attrs.size(); j += 2) { if (scoped_allocator_attrs[j] == i) { forward_from[i] = OpKernelContext::Params::kNeverForward; DCHECK_EQ(output_attrs[i].scope_id, 0); output_attrs[i].scope_id = scoped_allocator_attrs[j + 1]; } } } if (fwd_status.ok() && forward_from[i] == OpKernelContext::Params::kNoReservation) { DCHECK_EQ(forward_input.size() % 2, 0); for (int j = 0; j < forward_input.size(); j += 2) { if (forward_input[j + 1] == i) { DCHECK_EQ(forward_from[i], OpKernelContext::Params::kNoReservation); forward_from[i] = forward_input[j]; break; } } } } } return ptr; } Status GraphView::Initialize(const Graph* g) { CHECK(node_offsets_ == nullptr); const int num_nodes = g->num_node_ids(); num_nodes_ = num_nodes; size_t total_bytes = 0; for (const Node* n : g->nodes()) { if (n->out_edges().size() > kint32max) { return errors::InvalidArgument( "The executor cannot handle nodes with more than ", kint32max, " output edges. Node ", n->name(), " had ", n->out_edges().size(), " output edges."); } total_bytes += NodeItemBytes(n); } node_offsets_ = new uint32[num_nodes]; for (int i = 0; i < num_nodes; i++) { node_offsets_[i] = kuint32max; } space_ = new char[total_bytes]; char* ptr = space_; auto it = g->nodes(); if (OpOrderDeterminismRequired()) { std::vector<Node*> nodes(it.begin(), it.end()); std::sort(nodes.begin(), nodes.end(), NodeComparatorName()); for (const Node* n : nodes) { ptr = InitializeNode(ptr, n); } } else { for (const Node* n : it) { ptr = InitializeNode(ptr, n); } } CHECK_EQ(ptr, space_ + total_bytes); return absl::OkStatus(); } namespace { bool ExtractScopedAllocatorAttr(const std::vector<int>& sc_attr, int output_index, AllocatorAttributes* alloc_attr) { DCHECK_LE(2, sc_attr.size()); for (int i = 0; i < sc_attr.size(); i += 2) { if (sc_attr[i] == output_index) { CHECK_EQ(alloc_attr->scope_id, 0); alloc_attr->scope_id = sc_attr[i + 1]; return true; } } return false; } } void GraphView::SetScopedAllocatorAttrs( const std::vector<const Node*>& sa_nodes) { for (const Node* sa : sa_nodes) { NodeItem* sa_item = node(sa->id()); AllocatorAttributes* sa_attrs = sa_item->output_attr_base(); for (const auto& e : sa->out_edges()) { if (IsSink(e->dst()) || !e->IsControlEdge()) { continue; } Node* use_node = e->dst(); NodeItem* item = node(use_node->id()); AllocatorAttributes* use_attrs = item->output_attr_base(); std::vector<int> scoped_allocator_attrs; Status s = GetNodeAttr(use_node->attrs(), "_scoped_allocator", &scoped_allocator_attrs); if (!s.ok()) { VLOG(2) << "Failed to find expected ScopedAllocator attr on " << use_node->name(); continue; } for (const auto& e : use_node->out_edges()) { if (IsSink(e->dst()) || !e->IsControlEdge()) { AllocatorAttributes attr; if (ExtractScopedAllocatorAttr(scoped_allocator_attrs, e->src_output(), &attr)) { (use_attrs + e->src_output())->Merge(attr); attr = *(use_attrs + e->src_output()); attr.scope_id = 0; sa_attrs->Merge(attr); } } } } } } namespace { Status InferAllocAttr(const Node* n, const Node* dst, const DeviceNameUtils::ParsedName& local_dev_name, AllocatorAttributes* attr) { Status s; if (IsRecv(n)) { string src_name; s = GetNodeAttr(n->attrs(), "send_device", &src_name); if (!s.ok()) return s; DeviceNameUtils::ParsedName parsed_src_name; if (!DeviceNameUtils::ParseFullName(src_name, &parsed_src_name)) { s = errors::Internal("Bad send_device attr '", src_name, "' in node ", n->name()); return s; } if (!DeviceNameUtils::IsSameAddressSpace(parsed_src_name, local_dev_name)) { attr->set_nic_compatible(true); VLOG(2) << "node " << n->name() << " is the sink of an RPC in"; } else if ((local_dev_name.type == "CPU" || n->IsHostRecv()) && parsed_src_name.type != "CPU") { attr->set_gpu_compatible(true); VLOG(2) << "node " << n->name() << " is the sink of a gpu->cpu copy"; } else { VLOG(2) << "default alloc case local type " << local_dev_name.type << " remote type " << parsed_src_name.type; } } if (IsSend(dst)) { string dst_name; s = GetNodeAttr(dst->attrs(), "recv_device", &dst_name); if (!s.ok()) return s; DeviceNameUtils::ParsedName parsed_dst_name; if (!DeviceNameUtils::ParseFullName(dst_name, &parsed_dst_name)) { s = errors::Internal("Bad recv_device attr '", dst_name, "' in node ", n->name()); return s; } if (!DeviceNameUtils::IsSameAddressSpace(parsed_dst_name, local_dev_name)) { attr->set_nic_compatible(true); VLOG(2) << "node " << n->name() << " is the source of an RPC out"; } else if ((local_dev_name.type == "CPU" || dst->IsHostSend()) && parsed_dst_name.type != "CPU") { attr->set_gpu_compatible(true); VLOG(2) << "node " << n->name() << " is the source of a cpu->gpu copy"; } else { VLOG(2) << "default alloc case local type " << local_dev_name.type << " remote type " << parsed_dst_name.type; } } if (n->IsCollective()) { attr->set_nic_compatible(true); } return s; } } Status GraphView::SetAllocAttrs(const Graph* g, const Device* device) { Status s; const DeviceNameUtils::ParsedName& local_dev_name = device->parsed_name(); std::vector<const Node*> scoped_allocator_instances; for (const Node* n : g->nodes()) { NodeItem* item = node(n->id()); AllocatorAttributes* attrs = item->output_attr_base(); if (IsScopedAllocator(n)) { scoped_allocator_instances.push_back(n); } for (const auto& e : n->out_edges()) { if (!e->IsControlEdge()) { AllocatorAttributes attr; s = InferAllocAttr(n, e->dst(), local_dev_name, &attr); if (!s.ok()) return s; if (attr.value != 0 || attr.scope_id != 0) { attrs[e->src_output()].Merge(attr); } } } for (int out = 0; out < n->num_outputs(); out++) { const OpKernel* op_kernel = item->kernel; DCHECK_LT(out, op_kernel->output_memory_types().size()); bool on_host = op_kernel->output_memory_types()[out] == HOST_MEMORY; if (on_host) { AllocatorAttributes h; h.set_on_host(on_host); attrs[out].Merge(h); } } } SetScopedAllocatorAttrs(scoped_allocator_instances); return s; } }
#include "tensorflow/core/grappler/utils/graph_view.h" #include <type_traits> #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "absl/strings/substitute.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/benchmark_testlib.h" #include "tensorflow/core/grappler/utils/grappler_test.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace grappler { namespace utils { namespace { using ::tensorflow::test::function::GDef; using ::tensorflow::test::function::NDef; constexpr char kNoOp[] = "NoOp"; GraphDef SimpleTestGraph() { return GDef({NDef("a", kNoOp, {"b:2", "d:3", "b:2", "d:3", "^c"}), NDef("b", kNoOp, {"d:2", "c:5", "^c"}), NDef("c", kNoOp, {"^d", "^d"}), NDef("d", kNoOp, {})}, {}); } template <typename T> const string GetGraphViewTypeAsString() { return std::is_same<T, class GraphView>::value ? "GraphView" : "MutableGraphView"; } using GraphViewTypes = ::testing::Types<GraphView, MutableGraphView>; template <typename T> class TypedGraphViewTest : public ::testing::Test {}; TYPED_TEST_SUITE(TypedGraphViewTest, GraphViewTypes); TYPED_TEST(TypedGraphViewTest, GraphWithDuplicateNodeNames) { GraphDef graph = GDef({NDef("a", kNoOp, {}), NDef("a", kNoOp, {})}, {}); Status s; TypeParam graph_view(&graph, &s); EXPECT_FALSE(s.ok()); EXPECT_EQ(s.message(), absl::Substitute( "$0::$0 error: graph has multiple nodes with the name 'a'.", GetGraphViewTypeAsString<TypeParam>())); } TYPED_TEST(TypedGraphViewTest, GraphWithMissingFanins) { GraphDef graph = GDef({NDef("a", kNoOp, {"b:3"})}, {}); Status s; TypeParam graph_view(&graph, &s); EXPECT_FALSE(s.ok()); EXPECT_EQ(s.message(), absl::Substitute("$0::$0 error: node 'a' has missing fanin 'b:3'.", GetGraphViewTypeAsString<TypeParam>())); } TYPED_TEST(TypedGraphViewTest, GraphWithSelfCycles) { GraphDef graph = GDef({NDef("a", kNoOp, {"a:4"})}, {}); Status s; TypeParam graph_view(&graph, &s); EXPECT_FALSE(s.ok()); EXPECT_EQ( s.message(), absl::Substitute("$0::$0 error: node 'a' has self cycle fanin 'a:4'.", GetGraphViewTypeAsString<TypeParam>())); } TYPED_TEST(TypedGraphViewTest, GraphWithMisorderedFanins) { GraphDef graph = GDef({NDef("a", kNoOp, {"^b", "b:4"}), NDef("b", kNoOp, {})}, {}); Status s; TypeParam graph_view(&graph, &s); EXPECT_FALSE(s.ok()); EXPECT_EQ(s.message(), absl::Substitute("$0::$0 error: node 'a' has regular fanin 'b:4' " "after controlling fanins.", GetGraphViewTypeAsString<TypeParam>())); } TYPED_TEST(TypedGraphViewTest, GetNodeWithIndex) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); const int num_nodes = graph_view.NumNodes(); ASSERT_EQ(graph_view.NumNodes(), graph.node_size()); for (int i = 0; i < num_nodes; ++i) { const auto* node = graph_view.GetNode(i); ASSERT_NE(node, nullptr); EXPECT_EQ(node->node(), graph.mutable_node(i)); } const auto* bad_node = graph_view.GetNode(-1); ASSERT_EQ(bad_node, nullptr); bad_node = graph_view.GetNode(num_nodes); ASSERT_EQ(bad_node, nullptr); } TYPED_TEST(TypedGraphViewTest, GetNodeWithName) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); std::vector<string> node_names = {"a", "b", "c", "d"}; for (int i = 0; i < node_names.size(); ++i) { const string& node_name = node_names[i]; const auto* node = graph_view.GetNode(node_name); ASSERT_NE(node, nullptr); EXPECT_EQ(node->node(), graph.mutable_node(i)); } const auto* bad_node = graph_view.GetNode("e"); ASSERT_EQ(bad_node, nullptr); } TYPED_TEST(TypedGraphViewTest, GetNodes) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); const auto& nodes = graph_view.GetNodes(); const int num_nodes = nodes.size(); EXPECT_EQ(num_nodes, 4); ASSERT_EQ(num_nodes, graph.node_size()); for (int i = 0; i < num_nodes; ++i) { EXPECT_EQ(nodes[i].node(), graph.mutable_node(i)); } } TYPED_TEST(TypedGraphViewTest, HasNode) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); for (const string& node_name : {"a", "b", "c", "d"}) { EXPECT_TRUE(graph_view.HasNode(node_name)); } EXPECT_FALSE(graph_view.HasNode("e")); } TYPED_TEST(TypedGraphViewTest, NumNodes) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); EXPECT_EQ(graph_view.NumNodes(), 4); } TYPED_TEST(TypedGraphViewTest, NumNodesEmptyGraph) { GraphDef graph; Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); EXPECT_EQ(graph_view.NumNodes(), 0); } TEST(MutableGraphViewTest, DedupControlDependencies) { GraphDef graph = GDef( {NDef("a", kNoOp, {}), NDef("b", kNoOp, {}), NDef("c", kNoOp, {}), NDef("d", kNoOp, {"a:2", "b:1", "^c", "^c", "^a", "^a", "^b", "^c"})}, {}); Status s; MutableGraphView graph_view(&graph, &s); TF_ASSERT_OK(s); EXPECT_EQ(graph_view.NumNodes(), 4); const auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); const auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); const auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); const auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); EXPECT_EQ(d_node->NumRegularFanins(), 2); ASSERT_NE(d_node->node(), nullptr); ASSERT_EQ(d_node->node()->input_size(), 5); EXPECT_EQ(d_node->node()->input(0), "a:2"); EXPECT_EQ(d_node->node()->input(1), "b:1"); EXPECT_EQ(d_node->node()->input(2), "^c"); EXPECT_EQ(d_node->node()->input(3), "^b"); EXPECT_EQ(d_node->node()->input(4), "^a"); ASSERT_EQ(d_node->NumControllingFanins(), 3); const auto& d_control_fanins = d_node->GetControllingFanins(); ASSERT_EQ(d_control_fanins.size(), 3); ASSERT_NE(d_control_fanins[0].node_view(), nullptr); EXPECT_EQ(d_control_fanins[0].node_view()->GetName(), "c"); ASSERT_NE(d_control_fanins[1].node_view(), nullptr); EXPECT_EQ(d_control_fanins[1].node_view()->GetName(), "b"); ASSERT_NE(d_control_fanins[2].node_view(), nullptr); EXPECT_EQ(d_control_fanins[2].node_view()->GetName(), "a"); } template <typename T> class TypedNodeViewTest : public ::testing::Test {}; TYPED_TEST_SUITE(TypedNodeViewTest, GraphViewTypes); TYPED_TEST(TypedNodeViewTest, GetName) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); for (const NodeDef& node : graph.node()) { const auto* node_view = graph_view.GetNode(node.name()); ASSERT_NE(node_view, nullptr); EXPECT_EQ(node_view->GetName(), node.name()); EXPECT_EQ(node_view->GetName(), node_view->node()->name()); } } TYPED_TEST(TypedNodeViewTest, GetOp) { GraphDef graph = GDef({NDef("a", "op_a", {}), NDef("b", "op_b", {}), NDef("c", "op_c", {}), NDef("d", "op_d", {})}, {}); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); const auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); EXPECT_EQ(a_node->GetOp(), "op_a"); EXPECT_EQ(a_node->node()->op(), "op_a"); const auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); EXPECT_EQ(b_node->GetOp(), "op_b"); EXPECT_EQ(b_node->node()->op(), "op_b"); const auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); EXPECT_EQ(c_node->GetOp(), "op_c"); EXPECT_EQ(c_node->node()->op(), "op_c"); const auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); EXPECT_EQ(d_node->GetOp(), "op_d"); EXPECT_EQ(d_node->node()->op(), "op_d"); } TYPED_TEST(TypedNodeViewTest, GetDevice) { GraphDef graph = GDef( {NDef("a", "", {}, {}, "device_a"), NDef("b", "", {}, {}, "device_b"), NDef("c", "", {}, {}, "device_c"), NDef("d", "", {}, {})}, {}); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); const auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); EXPECT_EQ(a_node->GetDevice(), "device_a"); EXPECT_EQ(a_node->node()->device(), "device_a"); const auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); EXPECT_EQ(b_node->GetDevice(), "device_b"); EXPECT_EQ(b_node->node()->device(), "device_b"); const auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); EXPECT_EQ(c_node->GetDevice(), "device_c"); EXPECT_EQ(c_node->node()->device(), "device_c"); const auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); EXPECT_EQ(d_node->GetDevice(), ""); EXPECT_EQ(d_node->node()->device(), ""); } template <typename T> class TypedFaninTest : public ::testing::Test {}; using FaninTypes = ::testing::Types<std::pair<FanoutView, GraphView>, std::pair<MutableFanoutView, MutableGraphView>>; TYPED_TEST_SUITE(TypedFaninTest, FaninTypes); TYPED_TEST(TypedFaninTest, GetRegularFanins) { using FanoutViewType = typename TypeParam::first_type; using GraphViewType = typename TypeParam::second_type; GraphDef graph = SimpleTestGraph(); Status s; GraphViewType graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); const auto& a_fanins = a_node->GetRegularFanins(); ASSERT_EQ(a_fanins.size(), 4); EXPECT_EQ(a_fanins[0], FanoutViewType(&graph_view, b_node->node_index(), 2)); EXPECT_EQ(a_fanins[1], FanoutViewType(&graph_view, d_node->node_index(), 3)); EXPECT_EQ(a_fanins[2], FanoutViewType(&graph_view, b_node->node_index(), 2)); EXPECT_EQ(a_fanins[3], FanoutViewType(&graph_view, d_node->node_index(), 3)); const auto& d_fanins = d_node->GetRegularFanins(); EXPECT_EQ(d_fanins.size(), 0); } TYPED_TEST(TypedFaninTest, GetRegularFanin) { using FanoutViewType = typename TypeParam::first_type; using GraphViewType = typename TypeParam::second_type; GraphDef graph = SimpleTestGraph(); Status s; GraphViewType graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); const auto& a_fanin_0 = a_node->GetRegularFanin(0); EXPECT_EQ(a_fanin_0, FanoutViewType(&graph_view, b_node->node_index(), 2)); const auto& a_fanin_1 = a_node->GetRegularFanin(1); EXPECT_EQ(a_fanin_1, FanoutViewType(&graph_view, d_node->node_index(), 3)); const auto& a_fanin_2 = a_node->GetRegularFanin(2); EXPECT_EQ(a_fanin_2, FanoutViewType(&graph_view, b_node->node_index(), 2)); const auto& a_fanin_3 = a_node->GetRegularFanin(3); EXPECT_EQ(a_fanin_3, FanoutViewType(&graph_view, d_node->node_index(), 3)); const FanoutViewType missing_fanin; EXPECT_EQ(missing_fanin, FanoutViewType(nullptr, -1, -2)); EXPECT_EQ(missing_fanin.node_view(), nullptr); const auto& a_fanin_4 = a_node->GetRegularFanin(4); EXPECT_EQ(a_fanin_4, missing_fanin); const auto& a_fanin_5 = a_node->GetRegularFanin(5); EXPECT_EQ(a_fanin_5, missing_fanin); const auto& a_fanin_control = a_node->GetRegularFanin(Graph::kControlSlot); EXPECT_EQ(a_fanin_control, missing_fanin); const auto& a_fanin_bad = a_node->GetRegularFanin(-2); EXPECT_EQ(a_fanin_bad, missing_fanin); } TYPED_TEST(TypedFaninTest, GetControllingFanins) { using FanoutViewType = typename TypeParam::first_type; using GraphViewType = typename TypeParam::second_type; GraphDef graph = SimpleTestGraph(); Status s; GraphViewType graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); const auto& a_fanins = a_node->GetControllingFanins(); ASSERT_EQ(a_fanins.size(), 1); EXPECT_EQ(a_fanins[0], FanoutViewType(&graph_view, c_node->node_index(), Graph::kControlSlot)); const auto& c_fanins = c_node->GetControllingFanins(); FanoutViewType d_control_fanin(&graph_view, d_node->node_index(), Graph::kControlSlot); if (std::is_same<GraphViewType, GraphView>::value) { ASSERT_EQ(c_fanins.size(), 2); EXPECT_EQ(c_fanins[0], d_control_fanin); EXPECT_EQ(c_fanins[1], d_control_fanin); } else { ASSERT_EQ(c_fanins.size(), 1); EXPECT_EQ(c_fanins[0], d_control_fanin); } const auto& d_fanins = d_node->GetControllingFanins(); EXPECT_EQ(d_fanins.size(), 0); } template <typename T> class TypedFanoutTest : public ::testing::Test {}; using FanoutTypes = ::testing::Types<std::pair<FaninView, GraphView>, std::pair<MutableFaninView, MutableGraphView>>; TYPED_TEST_SUITE(TypedFanoutTest, FanoutTypes); TYPED_TEST(TypedFanoutTest, GetRegularFanouts) { using FaninViewType = typename TypeParam::first_type; using GraphViewType = typename TypeParam::second_type; GraphDef graph = SimpleTestGraph(); Status s; GraphViewType graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); const auto& d_fanouts = d_node->GetRegularFanouts(); ASSERT_EQ(d_fanouts.size(), 4); for (int i = 0; i < d_fanouts.size(); ++i) { if (i == 2) { ASSERT_EQ(d_fanouts[i].size(), 1); EXPECT_EQ(d_fanouts[i][0], FaninViewType(&graph_view, b_node->node_index(), 0)); } else if (i == 3) { ASSERT_EQ(d_fanouts[i].size(), 2); absl::flat_hash_set<FaninViewType> fanouts(d_fanouts[i].begin(), d_fanouts[i].end()); EXPECT_TRUE(fanouts.contains( FaninViewType(&graph_view, a_node->node_index(), 1))); EXPECT_TRUE(fanouts.contains( FaninViewType(&graph_view, a_node->node_index(), 3))); } else { EXPECT_EQ(d_fanouts[i].size(), 0); } } const auto& a_fanouts = a_node->GetRegularFanouts(); EXPECT_EQ(a_fanouts.size(), 0); } TYPED_TEST(TypedFanoutTest, GetRegularFanout) { using FaninViewType = typename TypeParam::first_type; using GraphViewType = typename TypeParam::second_type; GraphDef graph = SimpleTestGraph(); Status s; GraphViewType graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); const auto& d_fanouts_2 = d_node->GetRegularFanout(2); ASSERT_EQ(d_fanouts_2.size(), 1); EXPECT_EQ(d_fanouts_2.at(0), FaninViewType(&graph_view, b_node->node_index(), 0)); const auto& d_fanouts_3 = d_node->GetRegularFanout(3); EXPECT_EQ(d_fanouts_3.size(), 2); absl::flat_hash_set<FaninViewType> d_fanouts_3_set(d_fanouts_3.begin(), d_fanouts_3.end()); EXPECT_TRUE(d_fanouts_3_set.contains( FaninViewType(&graph_view, a_node->node_index(), 1))); EXPECT_TRUE(d_fanouts_3_set.contains( FaninViewType(&graph_view, a_node->node_index(), 3))); const std::vector<FaninViewType> no_fanouts; EXPECT_EQ(d_node->GetRegularFanout(-2), no_fanouts); EXPECT_EQ(d_node->GetRegularFanout(Graph::kControlSlot), no_fanouts); EXPECT_EQ(d_node->GetRegularFanout(0), no_fanouts); EXPECT_EQ(d_node->GetRegularFanout(1), no_fanouts); EXPECT_EQ(d_node->GetRegularFanout(4), no_fanouts); EXPECT_EQ(d_node->GetRegularFanout(5), no_fanouts); } TYPED_TEST(TypedFanoutTest, GetControlledFanouts) { using FaninViewType = typename TypeParam::first_type; using GraphViewType = typename TypeParam::second_type; GraphDef graph = SimpleTestGraph(); Status s; GraphViewType graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); const auto& c_fanouts = c_node->GetControlledFanouts(); EXPECT_EQ(c_fanouts.size(), 2); absl::flat_hash_set<FaninViewType> c_fanouts_set(c_fanouts.begin(), c_fanouts.end()); EXPECT_TRUE(c_fanouts_set.contains( FaninViewType(&graph_view, b_node->node_index(), Graph::kControlSlot))); EXPECT_TRUE(c_fanouts_set.contains( FaninViewType(&graph_view, a_node->node_index(), Graph::kControlSlot))); const auto& d_fanouts = d_node->GetControlledFanouts(); FaninViewType c_control_fanout(&graph_view, c_node->node_index(), Graph::kControlSlot); if (std::is_same<GraphViewType, GraphView>::value) { ASSERT_EQ(d_fanouts.size(), 2); EXPECT_EQ(d_fanouts[0], c_control_fanout); EXPECT_EQ(d_fanouts[1], c_control_fanout); } else { ASSERT_EQ(d_fanouts.size(), 1); EXPECT_EQ(d_fanouts[0], c_control_fanout); } const auto& a_fanouts = a_node->GetControlledFanouts(); EXPECT_EQ(a_fanouts.size(), 0); } TYPED_TEST(TypedNodeViewTest, NumRegularFanins) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); EXPECT_EQ(a_node->NumRegularFanins(), 4); EXPECT_EQ(b_node->NumRegularFanins(), 2); EXPECT_EQ(c_node->NumRegularFanins(), 0); EXPECT_EQ(d_node->NumRegularFanins(), 0); } TYPED_TEST(TypedNodeViewTest, NumControllingFanins) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); EXPECT_EQ(a_node->NumControllingFanins(), 1); EXPECT_EQ(b_node->NumControllingFanins(), 1); if (std::is_same<TypeParam, GraphView>::value) { EXPECT_EQ(c_node->NumControllingFanins(), 2); } else { EXPECT_EQ(c_node->NumControllingFanins(), 1); } EXPECT_EQ(d_node->NumControllingFanins(), 0); } TYPED_TEST(TypedNodeViewTest, NumRegularFanouts) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); EXPECT_EQ(a_node->NumRegularFanouts(), 0); EXPECT_EQ(b_node->NumRegularFanouts(), 2); EXPECT_EQ(c_node->NumRegularFanouts(), 1); EXPECT_EQ(d_node->NumRegularFanouts(), 3); } TYPED_TEST(TypedNodeViewTest, NumControlledFanouts) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); EXPECT_EQ(a_node->NumControlledFanouts(), 0); EXPECT_EQ(b_node->NumControlledFanouts(), 0); EXPECT_EQ(c_node->NumControlledFanouts(), 2); if (std::is_same<TypeParam, GraphView>::value) { EXPECT_EQ(d_node->NumControlledFanouts(), 2); } else { EXPECT_EQ(d_node->NumControlledFanouts(), 1); } } TYPED_TEST(TypedNodeViewTest, HasFanin) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); EXPECT_TRUE(a_node->HasFanin({&graph_view, b_node->node_index(), 2})); EXPECT_FALSE(a_node->HasFanin({&graph_view, c_node->node_index(), 4})); EXPECT_TRUE(a_node->HasFanin( {&graph_view, c_node->node_index(), Graph::kControlSlot})); EXPECT_FALSE(a_node->HasFanin( {&graph_view, b_node->node_index(), Graph::kControlSlot})); EXPECT_FALSE(a_node->HasFanin({&graph_view, a_node->node_index(), 0})); EXPECT_FALSE(a_node->HasFanin( {&graph_view, b_node->node_index(), internal::kMissingSlot})); } TYPED_TEST(TypedNodeViewTest, HasFanout) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); auto* d_node = graph_view.GetNode("d"); ASSERT_NE(d_node, nullptr); EXPECT_TRUE(b_node->HasFanout({&graph_view, a_node->node_index(), 2})); EXPECT_FALSE(b_node->HasFanout({&graph_view, a_node->node_index(), 1})); EXPECT_TRUE(d_node->HasFanout( {&graph_view, c_node->node_index(), Graph::kControlSlot})); EXPECT_FALSE(d_node->HasFanout( {&graph_view, a_node->node_index(), Graph::kControlSlot})); EXPECT_FALSE(d_node->HasFanout({&graph_view, d_node->node_index(), 0})); EXPECT_FALSE(a_node->HasFanout({&graph_view, b_node->node_index(), 0})); EXPECT_FALSE(a_node->HasFanout({&graph_view, 4, 0})); EXPECT_FALSE(d_node->HasFanout( {&graph_view, b_node->node_index(), internal::kMissingSlot})); } GraphDef SimpleAttrTestGraph() { return GDef({NDef("a", kNoOp, {}), NDef("b", kNoOp, {}, {{"attr", 1}}), NDef("c", kNoOp, {}, {{"attr_1", "a"}, {"attr_2", 2.0f}})}, {}); } TYPED_TEST(TypedNodeViewTest, GetAttr) { GraphDef graph = SimpleAttrTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); EXPECT_EQ(c_node->GetAttr("attr_1")->s(), "a"); } TYPED_TEST(TypedNodeViewTest, GetAttrs) { GraphDef graph = SimpleAttrTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); const auto& actual_attrs = c_node->GetAttrs(); EXPECT_EQ(actual_attrs.size(), 2); const auto* attr_1 = actual_attrs.Find("attr_1"); EXPECT_NE(attr_1, nullptr); EXPECT_EQ(attr_1->s(), "a"); const auto* attr_2 = actual_attrs.Find("attr_2"); EXPECT_NE(attr_2, nullptr); EXPECT_EQ(attr_2->f(), 2.0f); } TYPED_TEST(TypedNodeViewTest, NumAttrs) { GraphDef graph = SimpleAttrTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* a_node = graph_view.GetNode("a"); ASSERT_NE(a_node, nullptr); auto* b_node = graph_view.GetNode("b"); ASSERT_NE(b_node, nullptr); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); EXPECT_EQ(a_node->NumAttrs(), 0); EXPECT_EQ(b_node->NumAttrs(), 1); EXPECT_EQ(c_node->NumAttrs(), 2); } TYPED_TEST(TypedNodeViewTest, HasAttr) { GraphDef graph = SimpleAttrTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); auto* c_node = graph_view.GetNode("c"); ASSERT_NE(c_node, nullptr); EXPECT_TRUE(c_node->HasAttr("attr_1")); EXPECT_FALSE(c_node->HasAttr("attr")); } class CompareGraphTest : public GrapplerTest { public: void CompareGraphViewWithGraph(MutableGraphView* graph_view, const GraphDef& expected_graph) { Status s; GraphView expected_graph_view(&expected_graph, &s); TF_ASSERT_OK(s); EXPECT_EQ(graph_view->NumNodes(), expected_graph_view.NumNodes()); for (const NodeView& expected_node_view : expected_graph_view.GetNodes()) { const string& node_name = expected_node_view.GetName(); MutableNodeView* node_view = graph_view->GetNode(node_name); ASSERT_NE(node_view, nullptr); EXPECT_EQ(node_view->GetName(), expected_node_view.GetName()); EXPECT_EQ(node_view->GetOp(), expected_node_view.GetOp()); EXPECT_EQ(node_view->GetDevice(), expected_node_view.GetDevice()); const int actual_num_fanins = node_view->node()->input_size(); EXPECT_EQ(actual_num_fanins, expected_node_view.node()->input_size()); const int expected_num_regular_fanins = expected_node_view.NumRegularFanins(); bool same_num_regular_fanins = node_view->NumRegularFanins() == expected_num_regular_fanins; EXPECT_TRUE(same_num_regular_fanins); for (int i = 0; i < expected_num_regular_fanins; ++i) { const auto& expected_fanin = expected_node_view.GetRegularFanin(i); auto* actual_fanin_node = graph_view->GetNode(expected_fanin.node_view()->GetName()); ASSERT_NE(actual_fanin_node, nullptr); EXPECT_TRUE( node_view->HasFanin({actual_fanin_node, expected_fanin.index()})); if (i < node_view->NumRegularFanins()) { auto& actual_fanin = node_view->GetRegularFanin(i); EXPECT_EQ(actual_fanin, MutableFanoutView(actual_fanin_node, expected_fanin.index())); EXPECT_EQ(actual_fanin.node_index(), actual_fanin.node_view()->node_index()); } } if (same_num_regular_fanins) { for (int i = 0; i < expected_num_regular_fanins; ++i) { const auto& fanin = node_view->GetRegularFanin(i); EXPECT_EQ(ParseTensorName(node_view->node()->input(i)), TensorId(fanin.node_view()->GetName(), fanin.index())); } } const int expected_num_controlling_fanins = expected_node_view.NumControllingFanins(); bool same_num_controlling_fanins = node_view->NumControllingFanins() == expected_num_controlling_fanins; EXPECT_TRUE(same_num_controlling_fanins); for (int i = 0; i < expected_num_controlling_fanins; ++i) { auto& expected_fanin = expected_node_view.GetControllingFanins()[i]; auto* actual_fanin_node = graph_view->GetNode(expected_fanin.node_view()->GetName()); ASSERT_NE(actual_fanin_node, nullptr); MutableFanoutView actual_fanin(actual_fanin_node, expected_fanin.index()); EXPECT_TRUE(node_view->HasFanin(actual_fanin)); int found = 0; for (const auto& actual_fanin : node_view->GetControllingFanins()) { if (actual_fanin.index() == expected_fanin.index() && actual_fanin.node_view()->GetName() == expected_fanin.node_view()->GetName()) { EXPECT_EQ(actual_fanin.node_index(), actual_fanin.node_view()->node_index()); ++found; } } EXPECT_EQ(found, 1); } if (same_num_controlling_fanins && same_num_regular_fanins) { for (int i = 0; i < expected_num_controlling_fanins; ++i) { const auto& fanin = node_view->GetControllingFanins()[i]; EXPECT_EQ(ParseTensorName(node_view->node()->input( i + expected_num_regular_fanins)), TensorId(fanin.node_view()->GetName(), fanin.index())); } } EXPECT_EQ(node_view->NumRegularFanouts(), expected_node_view.NumRegularFanouts()); const int num_output_ports = expected_node_view.GetRegularFanouts().size(); ASSERT_EQ(node_view->GetRegularFanouts().size(), num_output_ports); for (int i = 0; i < num_output_ports; ++i) { auto& expected_fanouts_at_port_i = node_view->GetRegularFanouts()[i]; const int num_fanouts_at_port = expected_fanouts_at_port_i.size(); auto& actual_fanouts_at_port_i = node_view->GetRegularFanouts()[i]; EXPECT_EQ(actual_fanouts_at_port_i.size(), num_fanouts_at_port); for (int j = 0; j < num_fanouts_at_port; ++j) { auto& expected_fanout = expected_fanouts_at_port_i[j]; auto* actual_fanout_node = graph_view->GetNode(expected_fanout.node_view()->GetName()); ASSERT_NE(actual_fanout_node, nullptr); MutableFaninView actual_fanout(actual_fanout_node, expected_fanout.index()); EXPECT_TRUE(node_view->HasFanout(actual_fanout)); int found = 0; for (const auto& fanout : actual_fanouts_at_port_i) { if (fanout.index() == expected_fanout.index() && fanout.node_view()->GetName() == expected_fanout.node_view()->GetName()) { EXPECT_EQ(fanout.node_index(), fanout.node_view()->node_index()); ++found; } } EXPECT_EQ(found, 1); } } const int num_controlled_fanouts = expected_node_view.NumControlledFanouts(); EXPECT_EQ(node_view->NumControlledFanouts(), num_controlled_fanouts); for (int i = 0; i < num_controlled_fanouts; ++i) { const auto& expected_fanout = expected_node_view.GetControlledFanouts()[i]; auto* actual_fanout_node = graph_view->GetNode(expected_fanout.node_view()->GetName()); ASSERT_NE(actual_fanout_node, nullptr); MutableFaninView actual_fanout(actual_fanout_node, expected_fanout.index()); EXPECT_TRUE(node_view->HasFanout(actual_fanout)); int found = 0; for (const auto& fanout : node_view->GetControlledFanouts()) { if (fanout.index() == expected_fanout.index() && fanout.node_view()->GetName() == expected_fanout.node_view()->GetName()) { EXPECT_EQ(fanout.node_index(), fanout.node_view()->node_index()); ++found; } } EXPECT_EQ(found, 1); } EXPECT_EQ(node_view->NumAttrs(), expected_node_view.NumAttrs()); for (const auto& expected_attr : expected_node_view.GetAttrs()) { auto* attr = node_view->GetAttr(expected_attr.first); EXPECT_TRUE(AreAttrValuesEqual(*attr, expected_attr.second)); }
#include "tensorflow/core/common_runtime/graph_view.h" #include <atomic> #include <deque> #include <memory> #include <string> #include <unordered_map> #include <vector> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/edgeset.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/util/determinism.h" #include "tensorflow/core/util/device_name_utils.h" namespace tensorflow { string NodeItem::DebugString() const { string ret = strings::StrCat("{name:'", kernel->name(), "' id:", node_id); if (is_source) { strings::StrAppend(&ret, " source}"); } else { strings::StrAppend(&ret, " def:{", SummarizeNodeDef(kernel->def()), "}}"); }
TYPED_TEST(TypedNodeViewTest, GetName) { GraphDef graph = SimpleTestGraph(); Status s; TypeParam graph_view(&graph, &s); TF_ASSERT_OK(s); for (const NodeDef& node : graph.node()) { const auto* node_view = graph_view.GetNode(node.name()); ASSERT_NE(node_view, nullptr); EXPECT_EQ(node_view->GetName(), node.name()); EXPECT_EQ(node_view->GetName(), node_view->node()->name()); } }
#include "arolla/expr/operator_loader/generic_operator_overload_condition.h" #include <cstdint> #include <utility> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "arolla/expr/eval/model_executor.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/tuple_expr_operator.h" #include "arolla/io/wildcard_input_loader.h" #include "arolla/memory/optional_value.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/tuple_qtype.h" #include "arolla/qtype/typed_value.h" #include "arolla/util/status_macros_backport.h" namespace arolla::operator_loader { using ::arolla::expr::BindOp; using ::arolla::expr::CompileModelExecutor; using ::arolla::expr::ExprNodePtr; using ::arolla::expr::MakeTupleOperator; using ::arolla::expr::ModelEvaluationOptions; absl::StatusOr<GenericOperatorOverloadConditionFn> MakeGenericOperatorOverloadConditionFn( absl::Span<const ExprNodePtr> prepared_condition_exprs) { ASSIGN_OR_RETURN(auto expr, BindOp(MakeTupleOperator::Make(), prepared_condition_exprs, {})); auto accessor = [](QTypePtr input_tuple_qtype, absl::string_view) { return input_tuple_qtype; }; ASSIGN_OR_RETURN(auto input_loader, WildcardInputLoader<QTypePtr>::Build(accessor)); ASSIGN_OR_RETURN(auto model_executor, CompileModelExecutor<TypedValue>( std::move(expr), *input_loader)); const auto test_input_qtype = MakeTupleQType({}); const auto expected_output_qtype = MakeTupleQType( std::vector(prepared_condition_exprs.size(), GetQType<OptionalUnit>())); ASSIGN_OR_RETURN( auto actual_output, model_executor.ExecuteOnHeap(ModelEvaluationOptions{}, test_input_qtype)); if (actual_output.GetType() != expected_output_qtype) { return absl::FailedPreconditionError(absl::StrFormat( "unexpected return qtype: expected %s, got %s", expected_output_qtype->name(), actual_output.GetType()->name())); } return [model_executor = std::move(model_executor)]( QTypePtr input_tuple_qtype) -> absl::StatusOr<std::vector<bool>> { ASSIGN_OR_RETURN(auto qvalue, model_executor.ExecuteOnHeap(ModelEvaluationOptions{}, input_tuple_qtype)); const int64_t n = qvalue.GetFieldCount(); std::vector<bool> result(n); for (int64_t i = 0; i < n; ++i) { result[i] = qvalue.GetField(i).UnsafeAs<OptionalUnit>().present; } return result; }; } }
#include "arolla/expr/operator_loader/generic_operator_overload_condition.h" #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_node.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/tuple_qtype.h" #include "arolla/util/init_arolla.h" #include "arolla/util/testing/status_matchers_backport.h" #include "arolla/util/unit.h" namespace arolla::operator_loader { namespace { using ::arolla::expr::CallOp; using ::arolla::expr::ExprNodePtr; using ::arolla::expr::Leaf; using ::arolla::expr::Literal; using ::arolla::testing::IsOkAndHolds; using ::arolla::testing::StatusIs; class GenericOperatorOverloadConditionTest : public ::testing::Test { protected: void SetUp() override { ASSERT_OK(InitArolla()); } static absl::StatusOr<ExprNodePtr> Arg(int n) { return CallOp("qtype.get_field_qtype", {Leaf("input_tuple_qtype"), Literal(n)}); } static absl::StatusOr<ExprNodePtr> Equal(absl::StatusOr<ExprNodePtr> lhs, absl::StatusOr<ExprNodePtr> rhs) { return CallOp("core.equal", {lhs, rhs}); } static absl::StatusOr<ExprNodePtr> NotEqual(absl::StatusOr<ExprNodePtr> lhs, absl::StatusOr<ExprNodePtr> rhs) { return CallOp("core.not_equal", {lhs, rhs}); } static absl::StatusOr<ExprNodePtr> And(absl::StatusOr<ExprNodePtr> lhs, absl::StatusOr<ExprNodePtr> rhs) { return CallOp("core.presence_and", {lhs, rhs}); } }; TEST_F(GenericOperatorOverloadConditionTest, Empty) { ASSERT_OK_AND_ASSIGN(auto condition_fn, MakeGenericOperatorOverloadConditionFn({})); EXPECT_THAT(condition_fn(MakeTupleQType({})), IsOkAndHolds(std::vector<bool>())); } TEST_F(GenericOperatorOverloadConditionTest, SingleCondition) { ASSERT_OK_AND_ASSIGN(auto condition_expr, NotEqual(Arg(0), Literal(GetNothingQType()))); ASSERT_OK_AND_ASSIGN( auto condition_fn, MakeGenericOperatorOverloadConditionFn({condition_expr})); EXPECT_THAT(condition_fn(MakeTupleQType({})), IsOkAndHolds(std::vector({false}))); EXPECT_THAT(condition_fn(MakeTupleQType({GetNothingQType()})), IsOkAndHolds(std::vector({false}))); EXPECT_THAT(condition_fn(MakeTupleQType({GetQType<Unit>()})), IsOkAndHolds(std::vector({true}))); } TEST_F(GenericOperatorOverloadConditionTest, MultipleConditions) { ASSERT_OK_AND_ASSIGN(auto condition_expr_1, And(And(NotEqual(Arg(0), Literal(GetNothingQType())), NotEqual(Arg(1), Literal(GetNothingQType()))), NotEqual(Arg(0), Arg(1)))); ASSERT_OK_AND_ASSIGN(auto condition_expr_2, And(And(NotEqual(Arg(0), Literal(GetNothingQType())), NotEqual(Arg(1), Literal(GetNothingQType()))), Equal(Arg(0), Arg(1)))); ASSERT_OK_AND_ASSIGN(auto condition_fn, MakeGenericOperatorOverloadConditionFn( {condition_expr_1, condition_expr_2})); EXPECT_THAT(condition_fn(MakeTupleQType({})), IsOkAndHolds(std::vector({false, false}))); EXPECT_THAT(condition_fn(MakeTupleQType({GetNothingQType()})), IsOkAndHolds(std::vector({false, false}))); EXPECT_THAT( condition_fn(MakeTupleQType({GetQType<Unit>(), GetQType<Unit>()})), IsOkAndHolds(std::vector({false, true}))); EXPECT_THAT(condition_fn(MakeTupleQType({GetQType<Unit>(), GetQType<int>()})), IsOkAndHolds(std::vector({true, false}))); } TEST_F(GenericOperatorOverloadConditionTest, UnexpectedReturnQType) { ASSERT_OK_AND_ASSIGN(auto condition_expr_1, NotEqual(Arg(0), Literal(GetNothingQType()))); ASSERT_OK_AND_ASSIGN(auto condition_expr_2, Arg(1)); EXPECT_THAT(MakeGenericOperatorOverloadConditionFn( {condition_expr_1, condition_expr_2}), StatusIs(absl::StatusCode::kFailedPrecondition, "unexpected return qtype: expected " "tuple<OPTIONAL_UNIT,OPTIONAL_UNIT>, got " "tuple<OPTIONAL_UNIT,QTYPE>")); } } }
absl::StatusOr<GenericOperatorOverloadConditionFn> MakeGenericOperatorOverloadConditionFn( absl::Span<const ExprNodePtr> prepared_condition_exprs) { ASSIGN_OR_RETURN(auto expr, BindOp(MakeTupleOperator::Make(), prepared_condition_exprs, {})); auto accessor = [](QTypePtr input_tuple_qtype, absl::string_view) { return input_tuple_qtype; }; ASSIGN_OR_RETURN(auto input_loader, WildcardInputLoader<QTypePtr>::Build(accessor)); ASSIGN_OR_RETURN(auto model_executor, CompileModelExecutor<TypedValue>( std::move(expr), *input_loader)); const auto test_input_qtype = MakeTupleQType({}); const auto expected_output_qtype = MakeTupleQType( std::vector(prepared_condition_exprs.size(), GetQType<OptionalUnit>())); ASSIGN_OR_RETURN( auto actual_output, model_executor.ExecuteOnHeap(ModelEvaluationOptions{}, test_input_qtype)); if (actual_output.GetType() != expected_output_qtype) { return absl::FailedPreconditionError(absl::StrFormat( "unexpected return qtype: expected %s, got %s", expected_output_qtype->name(), actual_output.GetType()->name())); } return [model_executor = std::move(model_executor)]( QTypePtr input_tuple_qtype) -> absl::StatusOr<std::vector<bool>> { ASSIGN_OR_RETURN(auto qvalue, model_executor.ExecuteOnHeap(ModelEvaluationOptions{}, input_tuple_qtype)); const int64_t n = qvalue.GetFieldCount(); std::vector<bool> result(n); for (int64_t i = 0; i < n; ++i) { result[i] = qvalue.GetField(i).UnsafeAs<OptionalUnit>().present; } return result; }; }
TEST_F(GenericOperatorOverloadConditionTest, Empty) { ASSERT_OK_AND_ASSIGN(auto condition_fn, MakeGenericOperatorOverloadConditionFn({})); EXPECT_THAT(condition_fn(MakeTupleQType({})), IsOkAndHolds(std::vector<bool>())); } TEST_F(GenericOperatorOverloadConditionTest, SingleCondition) { ASSERT_OK_AND_ASSIGN(auto condition_expr, NotEqual(Arg(0), Literal(GetNothingQType()))); ASSERT_OK_AND_ASSIGN( auto condition_fn, MakeGenericOperatorOverloadConditionFn({condition_expr})); EXPECT_THAT(condition_fn(MakeTupleQType({})), IsOkAndHolds(std::vector({false}))); EXPECT_THAT(condition_fn(MakeTupleQType({GetNothingQType()})), IsOkAndHolds(std::vector({false}))); EXPECT_THAT(condition_fn(MakeTupleQType({GetQType<Unit>()})), IsOkAndHolds(std::vector({true}))); } TEST_F(GenericOperatorOverloadConditionTest, MultipleConditions) { ASSERT_OK_AND_ASSIGN(auto condition_expr_1, And(And(NotEqual(Arg(0), Literal(GetNothingQType())), NotEqual(Arg(1), Literal(GetNothingQType()))), NotEqual(Arg(0), Arg(1)))); ASSERT_OK_AND_ASSIGN(auto condition_expr_2, And(And(NotEqual(Arg(0), Literal(GetNothingQType())), NotEqual(Arg(1), Literal(GetNothingQType()))), Equal(Arg(0), Arg(1)))); ASSERT_OK_AND_ASSIGN(auto condition_fn, MakeGenericOperatorOverloadConditionFn( {condition_expr_1, condition_expr_2})); EXPECT_THAT(condition_fn(MakeTupleQType({})), IsOkAndHolds(std::vector({false, false}))); EXPECT_THAT(condition_fn(MakeTupleQType({GetNothingQType()})), IsOkAndHolds(std::vector({false, false}))); EXPECT_THAT( condition_fn(MakeTupleQType({GetQType<Unit>(), GetQType<Unit>()})), IsOkAndHolds(std::vector({false, true}))); EXPECT_THAT(condition_fn(MakeTupleQType({GetQType<Unit>(), GetQType<int>()})), IsOkAndHolds(std::vector({true, false}))); } TEST_F(GenericOperatorOverloadConditionTest, UnexpectedReturnQType) { ASSERT_OK_AND_ASSIGN(auto condition_expr_1, NotEqual(Arg(0), Literal(GetNothingQType()))); ASSERT_OK_AND_ASSIGN(auto condition_expr_2, Arg(1)); EXPECT_THAT(MakeGenericOperatorOverloadConditionFn( {condition_expr_1, condition_expr_2}), StatusIs(absl::StatusCode::kFailedPrecondition, "unexpected return qtype: expected " "tuple<OPTIONAL_UNIT,OPTIONAL_UNIT>, got " "tuple<OPTIONAL_UNIT,QTYPE>")); }
#include "tsl/platform/str_util.h" #include <cctype> #include <cstdint> #include <string> #include <vector> #include "absl/strings/ascii.h" #include "absl/strings/escaping.h" #include "absl/strings/match.h" #include "absl/strings/strip.h" #include "tsl/platform/logging.h" #include "tsl/platform/stringpiece.h" namespace tsl { namespace str_util { string CEscape(StringPiece src) { return absl::CEscape(src); } bool CUnescape(StringPiece source, string* dest, string* error) { return absl::CUnescape(source, dest, error); } void StripTrailingWhitespace(string* s) { absl::StripTrailingAsciiWhitespace(s); } size_t RemoveLeadingWhitespace(StringPiece* text) { absl::string_view new_text = absl::StripLeadingAsciiWhitespace(*text); size_t count = text->size() - new_text.size(); *text = new_text; return count; } size_t RemoveTrailingWhitespace(StringPiece* text) { absl::string_view new_text = absl::StripTrailingAsciiWhitespace(*text); size_t count = text->size() - new_text.size(); *text = new_text; return count; } size_t RemoveWhitespaceContext(StringPiece* text) { absl::string_view new_text = absl::StripAsciiWhitespace(*text); size_t count = text->size() - new_text.size(); *text = new_text; return count; } bool ConsumeLeadingDigits(StringPiece* s, uint64_t* val) { const char* p = s->data(); const char* limit = p + s->size(); uint64_t v = 0; while (p < limit) { const char c = *p; if (c < '0' || c > '9') break; uint64_t new_v = (v * 10) + (c - '0'); if (new_v / 8 < v) { return false; } v = new_v; p++; } if (p > s->data()) { s->remove_prefix(p - s->data()); *val = v; return true; } else { return false; } } bool ConsumeNonWhitespace(StringPiece* s, StringPiece* val) { const char* p = s->data(); const char* limit = p + s->size(); while (p < limit) { const char c = *p; if (isspace(c)) break; p++; } const size_t n = p - s->data(); if (n > 0) { *val = StringPiece(s->data(), n); s->remove_prefix(n); return true; } else { *val = StringPiece(); return false; } } bool ConsumePrefix(StringPiece* s, StringPiece expected) { return absl::ConsumePrefix(s, expected); } bool ConsumeSuffix(StringPiece* s, StringPiece expected) { return absl::ConsumeSuffix(s, expected); } StringPiece StripPrefix(StringPiece s, StringPiece expected) { return absl::StripPrefix(s, expected); } StringPiece StripSuffix(StringPiece s, StringPiece expected) { return absl::StripSuffix(s, expected); } string Lowercase(StringPiece s) { return absl::AsciiStrToLower(s); } string Uppercase(StringPiece s) { return absl::AsciiStrToUpper(s); } void TitlecaseString(string* s, StringPiece delimiters) { bool upper = true; for (string::iterator ss = s->begin(); ss != s->end(); ++ss) { if (upper) { *ss = toupper(*ss); } upper = (delimiters.find(*ss) != StringPiece::npos); } } string StringReplace(StringPiece s, StringPiece oldsub, StringPiece newsub, bool replace_all) { string res(s); size_t pos = 0; while ((pos = res.find(oldsub.data(), pos, oldsub.size())) != string::npos) { res.replace(pos, oldsub.size(), newsub.data(), newsub.size()); pos += newsub.size(); if (oldsub.empty()) { pos++; } if (!replace_all) { break; } } return res; } bool StartsWith(StringPiece text, StringPiece prefix) { return absl::StartsWith(text, prefix); } bool EndsWith(StringPiece text, StringPiece suffix) { return absl::EndsWith(text, suffix); } bool StrContains(StringPiece haystack, StringPiece needle) { return absl::StrContains(haystack, needle); } size_t Strnlen(const char* str, const size_t string_max_len) { size_t len = 0; while (len < string_max_len && str[len] != '\0') { ++len; } return len; } string ArgDefCase(StringPiece s) { const size_t n = s.size(); size_t extra_us = 0; size_t to_skip = 0; for (size_t i = 0; i < n; ++i) { if (i == to_skip && !isalpha(s[i])) { ++to_skip; continue; } if (isupper(s[i]) && i != to_skip && i > 0 && isalnum(s[i - 1])) { ++extra_us; } } string result(n + extra_us - to_skip, '_'); for (size_t i = to_skip, j = 0; i < n; ++i, ++j) { DCHECK_LT(j, result.size()); char c = s[i]; if (isalnum(c)) { if (isupper(c)) { if (i != to_skip) { DCHECK_GT(j, 0); if (result[j - 1] != '_') ++j; } result[j] = tolower(c); } else { result[j] = c; } } } return result; } } }
#include "tsl/platform/str_util.h" #include <vector> #include "tsl/platform/test.h" namespace tsl { TEST(CEscape, Basic) { EXPECT_EQ(str_util::CEscape("hello"), "hello"); EXPECT_EQ(str_util::CEscape("hello\n"), "hello\\n"); EXPECT_EQ(str_util::CEscape("hello\r"), "hello\\r"); EXPECT_EQ(str_util::CEscape("\t\r\"'"), "\\t\\r\\\"\\'"); EXPECT_EQ(str_util::CEscape("\320hi\200"), "\\320hi\\200"); } string ExpectCUnescapeSuccess(StringPiece source) { string dest; string error; EXPECT_TRUE(str_util::CUnescape(source, &dest, &error)) << error; return dest; } TEST(CUnescape, Basic) { EXPECT_EQ("hello", ExpectCUnescapeSuccess("hello")); EXPECT_EQ("hello\n", ExpectCUnescapeSuccess("hello\\n")); EXPECT_EQ("hello\r", ExpectCUnescapeSuccess("hello\\r")); EXPECT_EQ("\t\r\"'", ExpectCUnescapeSuccess("\\t\\r\\\"\\'")); EXPECT_EQ("\320hi\200", ExpectCUnescapeSuccess("\\320hi\\200")); } TEST(CUnescape, HandlesCopyOnWriteStrings) { string dest = "hello"; string read = dest; string error; StringPiece source = "llohe"; EXPECT_TRUE(str_util::CUnescape(source, &dest, &error)); EXPECT_EQ("hello", read); } TEST(StripTrailingWhitespace, Basic) { string test; test = "hello"; str_util::StripTrailingWhitespace(&test); EXPECT_EQ(test, "hello"); test = "foo "; str_util::StripTrailingWhitespace(&test); EXPECT_EQ(test, "foo"); test = " "; str_util::StripTrailingWhitespace(&test); EXPECT_EQ(test, ""); test = ""; str_util::StripTrailingWhitespace(&test); EXPECT_EQ(test, ""); test = " abc\t"; str_util::StripTrailingWhitespace(&test); EXPECT_EQ(test, " abc"); } TEST(RemoveLeadingWhitespace, Basic) { string text = " \t \n \r Quick\t"; StringPiece data(text); EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 11); EXPECT_EQ(data, StringPiece("Quick\t")); EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0); EXPECT_EQ(data, StringPiece("Quick\t")); } TEST(RemoveLeadingWhitespace, TerminationHandling) { string text = "\t"; StringPiece data(text); EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 1); EXPECT_EQ(data, StringPiece("")); EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0); EXPECT_EQ(data, StringPiece("")); } TEST(RemoveTrailingWhitespace, Basic) { string text = " \t \n \r Quick \t"; StringPiece data(text); EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 2); EXPECT_EQ(data, StringPiece(" \t \n \r Quick")); EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0); EXPECT_EQ(data, StringPiece(" \t \n \r Quick")); } TEST(RemoveTrailingWhitespace, TerminationHandling) { string text = "\t"; StringPiece data(text); EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 1); EXPECT_EQ(data, StringPiece("")); EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0); EXPECT_EQ(data, StringPiece("")); } TEST(RemoveWhitespaceContext, Basic) { string text = " \t \n \r Quick \t"; StringPiece data(text); EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 13); EXPECT_EQ(data, StringPiece("Quick")); EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0); EXPECT_EQ(data, StringPiece("Quick")); text = ""; data = text; EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0); EXPECT_EQ(data, StringPiece("")); } void TestConsumeLeadingDigits(StringPiece s, int64_t expected, StringPiece remaining) { uint64 v; StringPiece input(s); if (str_util::ConsumeLeadingDigits(&input, &v)) { EXPECT_EQ(v, static_cast<uint64>(expected)); EXPECT_EQ(input, remaining); } else { EXPECT_LT(expected, 0); EXPECT_EQ(input, remaining); } } TEST(ConsumeLeadingDigits, Basic) { using str_util::ConsumeLeadingDigits; TestConsumeLeadingDigits("123", 123, ""); TestConsumeLeadingDigits("a123", -1, "a123"); TestConsumeLeadingDigits("9_", 9, "_"); TestConsumeLeadingDigits("11111111111xyz", 11111111111ll, "xyz"); TestConsumeLeadingDigits("1111111111111111111111111111111xyz", -1, "1111111111111111111111111111111xyz"); TestConsumeLeadingDigits("18446744073709551616xyz", -1, "18446744073709551616xyz"); TestConsumeLeadingDigits("18446744073709551615xyz", 18446744073709551615ull, "xyz"); TestConsumeLeadingDigits("184467440737095516159yz", -1, "184467440737095516159yz"); } void TestConsumeNonWhitespace(StringPiece s, StringPiece expected, StringPiece remaining) { StringPiece v; StringPiece input(s); if (str_util::ConsumeNonWhitespace(&input, &v)) { EXPECT_EQ(v, expected); EXPECT_EQ(input, remaining); } else { EXPECT_EQ(expected, ""); EXPECT_EQ(input, remaining); } } TEST(ConsumeNonWhitespace, Basic) { TestConsumeNonWhitespace("", "", ""); TestConsumeNonWhitespace(" ", "", " "); TestConsumeNonWhitespace("abc", "abc", ""); TestConsumeNonWhitespace("abc ", "abc", " "); } TEST(ConsumePrefix, Basic) { string s("abcdef"); StringPiece input(s); EXPECT_FALSE(str_util::ConsumePrefix(&input, "abcdefg")); EXPECT_EQ(input, "abcdef"); EXPECT_FALSE(str_util::ConsumePrefix(&input, "abce")); EXPECT_EQ(input, "abcdef"); EXPECT_TRUE(str_util::ConsumePrefix(&input, "")); EXPECT_EQ(input, "abcdef"); EXPECT_FALSE(str_util::ConsumePrefix(&input, "abcdeg")); EXPECT_EQ(input, "abcdef"); EXPECT_TRUE(str_util::ConsumePrefix(&input, "abcdef")); EXPECT_EQ(input, ""); input = s; EXPECT_TRUE(str_util::ConsumePrefix(&input, "abcde")); EXPECT_EQ(input, "f"); } TEST(StripPrefix, Basic) { EXPECT_EQ(str_util::StripPrefix("abcdef", "abcdefg"), "abcdef"); EXPECT_EQ(str_util::StripPrefix("abcdef", "abce"), "abcdef"); EXPECT_EQ(str_util::StripPrefix("abcdef", ""), "abcdef"); EXPECT_EQ(str_util::StripPrefix("abcdef", "abcdeg"), "abcdef"); EXPECT_EQ(str_util::StripPrefix("abcdef", "abcdef"), ""); EXPECT_EQ(str_util::StripPrefix("abcdef", "abcde"), "f"); } TEST(JoinStrings, Basic) { std::vector<string> s; s = {"hi"}; EXPECT_EQ(str_util::Join(s, " "), "hi"); s = {"hi", "there", "strings"}; EXPECT_EQ(str_util::Join(s, " "), "hi there strings"); std::vector<StringPiece> sp; sp = {"hi"}; EXPECT_EQ(str_util::Join(sp, ",,"), "hi"); sp = {"hi", "there", "strings"}; EXPECT_EQ(str_util::Join(sp, "--"), "hi--there--strings"); } TEST(JoinStrings, Join3) { std::vector<string> s; s = {"hi"}; auto l1 = [](string* out, string s) { *out += s; }; EXPECT_EQ(str_util::Join(s, " ", l1), "hi"); s = {"hi", "there", "strings"}; auto l2 = [](string* out, string s) { *out += s[0]; }; EXPECT_EQ(str_util::Join(s, " ", l2), "h t s"); } TEST(Split, Basic) { EXPECT_TRUE(str_util::Split("", ',').empty()); EXPECT_EQ(str_util::Join(str_util::Split("a", ','), "|"), "a"); EXPECT_EQ(str_util::Join(str_util::Split(",", ','), "|"), "|"); EXPECT_EQ(str_util::Join(str_util::Split("a,b,c", ','), "|"), "a|b|c"); EXPECT_EQ(str_util::Join(str_util::Split("a,,,b,,c,", ','), "|"), "a|||b||c|"); EXPECT_EQ(str_util::Join(str_util::Split("a!,!b,!c,", ",!"), "|"), "a|||b||c|"); EXPECT_EQ(str_util::Join( str_util::Split("a,,,b,,c,", ',', str_util::SkipEmpty()), "|"), "a|b|c"); EXPECT_EQ( str_util::Join( str_util::Split("a, ,b,,c,", ',', str_util::SkipWhitespace()), "|"), "a|b|c"); EXPECT_EQ(str_util::Join(str_util::Split("a. !b,;c,", ".,;!", str_util::SkipWhitespace()), "|"), "a|b|c"); } TEST(Lowercase, Basic) { EXPECT_EQ("", str_util::Lowercase("")); EXPECT_EQ("hello", str_util::Lowercase("hello")); EXPECT_EQ("hello world", str_util::Lowercase("Hello World")); } TEST(Uppercase, Basic) { EXPECT_EQ("", str_util::Uppercase("")); EXPECT_EQ("HELLO", str_util::Uppercase("hello")); EXPECT_EQ("HELLO WORLD", str_util::Uppercase("Hello World")); } TEST(SnakeCase, Basic) { EXPECT_EQ("", str_util::ArgDefCase("")); EXPECT_EQ("", str_util::ArgDefCase("!")); EXPECT_EQ("", str_util::ArgDefCase("5")); EXPECT_EQ("", str_util::ArgDefCase("!:")); EXPECT_EQ("", str_util::ArgDefCase("5-5")); EXPECT_EQ("", str_util::ArgDefCase("_!")); EXPECT_EQ("", str_util::ArgDefCase("_5")); EXPECT_EQ("a", str_util::ArgDefCase("_a")); EXPECT_EQ("a", str_util::ArgDefCase("_A")); EXPECT_EQ("i", str_util::ArgDefCase("I")); EXPECT_EQ("i", str_util::ArgDefCase("i")); EXPECT_EQ("i_", str_util::ArgDefCase("I%")); EXPECT_EQ("i_", str_util::ArgDefCase("i%")); EXPECT_EQ("i", str_util::ArgDefCase("%I")); EXPECT_EQ("i", str_util::ArgDefCase("-i")); EXPECT_EQ("i", str_util::ArgDefCase("3i")); EXPECT_EQ("i", str_util::ArgDefCase("32i")); EXPECT_EQ("i3", str_util::ArgDefCase("i3")); EXPECT_EQ("i_a3", str_util::ArgDefCase("i_A3")); EXPECT_EQ("i_i", str_util::ArgDefCase("II")); EXPECT_EQ("i_i", str_util::ArgDefCase("I_I")); EXPECT_EQ("i__i", str_util::ArgDefCase("I__I")); EXPECT_EQ("i_i_32", str_util::ArgDefCase("II-32")); EXPECT_EQ("ii_32", str_util::ArgDefCase("Ii-32")); EXPECT_EQ("hi_there", str_util::ArgDefCase("HiThere")); EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi!Hi")); EXPECT_EQ("hi_hi", str_util::ArgDefCase("HiHi")); EXPECT_EQ("hihi", str_util::ArgDefCase("Hihi")); EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi_Hi")); } TEST(TitlecaseString, Basic) { string s = "sparse_lookup"; str_util::TitlecaseString(&s, "_"); ASSERT_EQ(s, "Sparse_Lookup"); s = "sparse_lookup"; str_util::TitlecaseString(&s, " "); ASSERT_EQ(s, "Sparse_lookup"); s = "dense"; str_util::TitlecaseString(&s, " "); ASSERT_EQ(s, "Dense"); } TEST(StringReplace, Basic) { EXPECT_EQ("XYZ_XYZ_XYZ", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ", true)); } TEST(StringReplace, OnlyFirst) { EXPECT_EQ("XYZ_ABC_ABC", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ", false)); } TEST(StringReplace, IncreaseLength) { EXPECT_EQ("a b c", str_util::StringReplace("abc", "b", " b ", true)); } TEST(StringReplace, IncreaseLengthMultipleMatches) { EXPECT_EQ("a b b c", str_util::StringReplace("abbc", "b", " b ", true)); } TEST(StringReplace, NoChange) { EXPECT_EQ("abc", str_util::StringReplace("abc", "d", "X", true)); } TEST(StringReplace, EmptyStringReplaceFirst) { EXPECT_EQ("", str_util::StringReplace("", "a", "X", false)); } TEST(StringReplace, EmptyStringReplaceAll) { EXPECT_EQ("", str_util::StringReplace("", "a", "X", true)); } TEST(Strnlen, Basic) { EXPECT_EQ(0, str_util::Strnlen("ab", 0)); EXPECT_EQ(1, str_util::Strnlen("a", 1)); EXPECT_EQ(2, str_util::Strnlen("abcd", 2)); EXPECT_EQ(3, str_util::Strnlen("abc", 10)); EXPECT_EQ(4, str_util::Strnlen("a \t\n", 10)); } }
string StringReplace(StringPiece s, StringPiece oldsub, StringPiece newsub, bool replace_all) { string res(s); size_t pos = 0; while ((pos = res.find(oldsub.data(), pos, oldsub.size())) != string::npos) { res.replace(pos, oldsub.size(), newsub.data(), newsub.size()); pos += newsub.size(); if (oldsub.empty()) { pos++; } if (!replace_all) { break; } } return res; }
TEST(StringReplace, Basic) { EXPECT_EQ("XYZ_XYZ_XYZ", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ", true)); } TEST(StringReplace, OnlyFirst) { EXPECT_EQ("XYZ_ABC_ABC", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ", false)); } TEST(StringReplace, IncreaseLength) { EXPECT_EQ("a b c", str_util::StringReplace("abc", "b", " b ", true)); } TEST(StringReplace, IncreaseLengthMultipleMatches) { EXPECT_EQ("a b b c", str_util::StringReplace("abbc", "b", " b ", true)); } TEST(StringReplace, NoChange) { EXPECT_EQ("abc", str_util::StringReplace("abc", "d", "X", true)); } TEST(StringReplace, EmptyStringReplaceFirst) { EXPECT_EQ("", str_util::StringReplace("", "a", "X", false)); } TEST(StringReplace, EmptyStringReplaceAll) { EXPECT_EQ("", str_util::StringReplace("", "a", "X", true)); }
#ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ #define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ #include <cassert> #include <cstddef> #include <memory> #include <type_traits> #include <utility> #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template <class Reference, class Policy> struct node_slot_policy { static_assert(std::is_lvalue_reference<Reference>::value, ""); using slot_type = typename std::remove_cv< typename std::remove_reference<Reference>::type>::type*; template <class Alloc, class... Args> static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { *slot = Policy::new_element(alloc, std::forward<Args>(args)...); } template <class Alloc> static void destroy(Alloc* alloc, slot_type* slot) { Policy::delete_element(alloc, *slot); } template <class Alloc> static std::true_type transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { *new_slot = *old_slot; return {}; } static size_t space_used(const slot_type* slot) { if (slot == nullptr) return Policy::element_space_used(nullptr); return Policy::element_space_used(*slot); } static Reference element(slot_type* slot) { return **slot; } template <class T, class P = Policy> static auto value(T* elem) -> decltype(P::value(elem)) { return P::value(elem); } template <class... Ts, class P = Policy> static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) { return P::apply(std::forward<Ts>(ts)...); } }; } ABSL_NAMESPACE_END } #endif
#include "absl/container/internal/node_slot_policy.h" #include <memory> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/config.h" #include "absl/container/internal/hash_policy_traits.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::testing::Pointee; struct Policy : node_slot_policy<int&, Policy> { using key_type = int; using init_type = int; template <class Alloc> static int* new_element(Alloc* alloc, int value) { return new int(value); } template <class Alloc> static void delete_element(Alloc* alloc, int* elem) { delete elem; } }; using NodePolicy = hash_policy_traits<Policy>; struct NodeTest : ::testing::Test { std::allocator<int> alloc; int n = 53; int* a = &n; }; TEST_F(NodeTest, ConstructDestroy) { NodePolicy::construct(&alloc, &a, 42); EXPECT_THAT(a, Pointee(42)); NodePolicy::destroy(&alloc, &a); } TEST_F(NodeTest, transfer) { int s = 42; int* b = &s; NodePolicy::transfer(&alloc, &a, &b); EXPECT_EQ(&s, a); EXPECT_TRUE(NodePolicy::transfer_uses_memcpy()); } } } ABSL_NAMESPACE_END }
template <class Alloc> static void destroy(Alloc* alloc, slot_type* slot) { Policy::delete_element(alloc, *slot); }
TEST_F(NodeTest, ConstructDestroy) { NodePolicy::construct(&alloc, &a, 42); EXPECT_THAT(a, Pointee(42)); NodePolicy::destroy(&alloc, &a); }
#include "xla/service/gpu/dynamic_slice_fusion_rewriter.h" #include <cstddef> #include <cstdint> #include <functional> #include <iterator> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/ffi/api/c_api.h" #include "xla/ffi/ffi_api.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/service/custom_call_target_registry.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/gpu_constants.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using DefUseDataflowPath = absl::InlinedVector<HloInstruction*, 2>; using DefUseDataflowPaths = absl::InlinedVector<DefUseDataflowPath, 4>; using UseDefDataflowPath = absl::InlinedVector<HloInstruction*, 4>; using UseDefDataflowPaths = absl::InlinedVector<HloInstruction*, 8>; using DataflowPathView = absl::Span<HloInstruction* const>; using DataflowPathsView = absl::Span<DataflowPathView>; using InstructionSet = absl::flat_hash_set<HloInstruction*>; bool IsNoOp(const HloInstruction* hlo) { return HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kTuple, HloOpcode::kGetTupleElement>(hlo); } bool IsCustomCall(const HloInstruction* hlo, absl::string_view platform_name) { auto* custom_call = DynCast<HloCustomCallInstruction>(hlo); if (custom_call == nullptr) return false; if (custom_call->shape().IsTuple() && absl::c_any_of( custom_call->shape().tuple_shapes(), [&](const Shape& sub_shape) { return sub_shape.IsToken(); })) return false; const std::string call_target_name = custom_call->custom_call_target(); bool is_ffi_custom_call = custom_call->api_version() == CustomCallApiVersion::API_VERSION_TYPED_FFI; void* call_target = CustomCallTargetRegistry::Global()->Lookup( call_target_name, std::string(platform_name)); absl::StatusOr<ffi::HandlerRegistration> handler_registration = ffi::FindHandler(call_target_name, platform_name); bool found_custom_call = !is_ffi_custom_call && call_target != nullptr; bool found_ffi_handler = is_ffi_custom_call && handler_registration.ok(); return found_custom_call || found_ffi_handler; } bool IsAlignedSlice(const Shape& src_shape, const Shape& dst_shape, const HloSliceInstruction* slice) { if (!IsContiguousSlice(src_shape, dst_shape)) return false; auto strides = ShapeUtil::ByteStrides(dst_shape); if (!strides.has_value()) return false; for (auto dim : dst_shape.layout().minor_to_major()) { if ((strides.value()[dim] % kXlaAllocatedBufferAlignBytes) == 0) return true; if (dst_shape.dimensions(dim) < src_shape.dimensions(dim)) { return (slice != nullptr && ((strides.value()[dim] * slice->slice_starts(dim)) % kXlaAllocatedBufferAlignBytes == 0)); } } return true; } UseDefDataflowPaths GetSlicedOperandPaths(const HloInstruction* instr) { UseDefDataflowPaths sliced_operand_paths; InstructionSet processed_instrs; const auto& aliasing_pairs = Cast<HloCustomCallInstruction>(instr)->output_to_operand_aliasing(); absl::flat_hash_set<int64_t> aliased_operands; for (const auto& pair : aliasing_pairs) { aliased_operands.insert(pair.second.first); } for (const auto* operand : instr->operands()) { if (aliased_operands.contains(instr->operand_index(operand))) continue; UseDefDataflowPath maybe_sliced_operand_path; bool slice_found = false; auto maybe_slice_instr = HloFindIf({operand}, [&](const HloInstruction* cur) { if (processed_instrs.contains(cur)) return true; maybe_sliced_operand_path.push_back(const_cast<HloInstruction*>(cur)); if (IsOpcodeAnyOf<HloOpcode::kDynamicSlice, HloOpcode::kSlice>(cur)) { if (IsAlignedSlice(cur->operand(0)->shape(), cur->shape(), DynCast<HloSliceInstruction>(cur))) { slice_found = true; return slice_found; } } return !IsNoOp(cur); }); if (maybe_slice_instr == std::nullopt) continue; if (slice_found || processed_instrs.contains(maybe_slice_instr.value())) { sliced_operand_paths.insert(sliced_operand_paths.end(), maybe_sliced_operand_path.rbegin(), maybe_sliced_operand_path.rend()); processed_instrs.insert(maybe_sliced_operand_path.begin(), maybe_sliced_operand_path.end()); } } sliced_operand_paths.push_back(const_cast<HloInstruction*>(instr)); return sliced_operand_paths; } DefUseDataflowPaths GetSlicedUserPaths(const HloInstruction* instr) { DefUseDataflowPaths sliced_user_paths; InstructionSet processed_instrs; auto traverse_hlo_and_collect = [&](HloInstruction* start) { DefUseDataflowPath maybe_sliced_user_path; bool dus_found = false; auto maybe_dus_instr = HloFindIf( {start}, [&](const HloInstruction* cur) { if (processed_instrs.contains(cur)) return true; maybe_sliced_user_path.push_back(const_cast<HloInstruction*>(cur)); if (const auto slice_instr = DynCast<HloDynamicUpdateSliceInstruction>(cur)) { if (IsAlignedSlice(slice_instr->shape(), slice_instr->update()->shape(), nullptr)) { dus_found = true; return true; } } return cur->user_count() > 1 || !IsNoOp(cur); }, false); if (maybe_dus_instr == std::nullopt) return; if (dus_found || processed_instrs.contains(maybe_dus_instr.value())) { processed_instrs.insert(maybe_sliced_user_path.begin(), maybe_sliced_user_path.end()); sliced_user_paths.push_back(std::move(maybe_sliced_user_path)); } }; if (instr->shape().IsTuple()) { for (auto* user : instr->users()) { if (DynCast<HloGetTupleElementInstruction>(user)) { traverse_hlo_and_collect(user); } } } else { if (instr->user_count() == 1) { traverse_hlo_and_collect(instr->users().front()); } } return sliced_user_paths; } absl::InlinedVector<HloInstruction*, 4> GetPatternCaptures( DataflowPathView matches) { absl::InlinedVector<HloInstruction*, 4> captures; InstructionSet matched_instrs(matches.begin(), matches.end()); for (HloInstruction* instr : matches) { for (HloInstruction* operand : instr->operands()) { if (!matched_instrs.contains(operand) && absl::c_find(captures, operand) == captures.end()) { captures.emplace_back(operand); } } } return captures; } absl::Status CreateRootTuple( HloInstruction* hero, HloComputation::Builder& builder, DataflowPathsView sliced_user_paths, absl::flat_hash_map<const HloInstruction*, HloInstruction*>& instr_mapping) { unsigned tuple_size = hero->shape().tuple_shapes_size(); std::vector<HloInstruction*> sliced_elems(tuple_size, nullptr); for (auto& sliced_user_path : sliced_user_paths) { auto gte = Cast<HloGetTupleElementInstruction>(sliced_user_path.front()); sliced_elems[gte->tuple_index()] = sliced_user_path.back(); } std::vector<HloInstruction*> elements; for (size_t i = 0; i < tuple_size; ++i) { if (sliced_elems[i] != nullptr) { elements.push_back(instr_mapping[sliced_elems[i]]); continue; } auto* gte = builder.AddInstruction( HloInstruction::CreateGetTupleElement(instr_mapping[hero], i)); if (hero->shape().tuple_shapes(i).IsTuple()) { instr_mapping[gte] = gte; TF_RETURN_IF_ERROR(CreateRootTuple(gte, builder, {}, instr_mapping)); elements.push_back(builder.last_added_instruction()); } else { elements.push_back(gte); } } if (elements.size() > 1) builder.AddInstruction(HloInstruction::CreateTuple(elements)); return absl::OkStatus(); } absl::StatusOr<HloComputation*> CreateFusionBody( HloModule* module, DataflowPathView sliced_operand_paths, DataflowPathsView sliced_user_paths, DataflowPathView captures) { HloComputation::Builder builder("address-computation"); absl::flat_hash_map<const HloInstruction*, HloInstruction*> instr_mapping; auto mapped_operands = [&](HloInstruction* instr) { absl::InlinedVector<HloInstruction*, 4> operands; for (HloInstruction* operand : instr->operands()) { operands.push_back(instr_mapping.at(operand)); } return operands; }; for (const HloInstruction* capture : captures) { int64_t index = instr_mapping.size(); instr_mapping[capture] = builder.AddInstruction(HloInstruction::CreateParameter( index, capture->shape(), absl::StrCat("p", index))); } HloInstruction* hero; for (HloInstruction* instr : sliced_operand_paths) { instr_mapping[instr] = builder.AddInstruction( instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr))); hero = instr; } for (auto& sliced_user_path : sliced_user_paths) { for (HloInstruction* instr : sliced_user_path) { instr_mapping[instr] = builder.AddInstruction( instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr))); } } if (hero->shape().IsTuple() && hero->shape().tuple_shapes_size() > 0) { TF_RETURN_IF_ERROR( CreateRootTuple(hero, builder, sliced_user_paths, instr_mapping)); } return module->AddComputationAndUnifyNamesAndIds(builder.Build(), false); } absl::StatusOr<HloInstruction*> CreateFusionInstruction( HloModule* module, HloInstruction* orig, DataflowPathView captures, HloComputation* body, bool dynamic) { HloComputation* parent = orig->parent(); HloInstruction* fusion = parent->AddInstruction(HloInstruction::CreateFusion( body->root_instruction()->shape(), HloInstruction::FusionKind::kCustom, captures, body)); module->SetAndUniquifyInstrName(fusion, "address_computation"); GpuBackendConfig gpu_config; FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind("__custom_fusion"); CustomFusionConfig config; config.set_name(dynamic ? "dynamic_address_computation" : "address_computation"); *backend_config.mutable_custom_fusion_config() = config; TF_RETURN_IF_ERROR(fusion->set_backend_config(std::move(gpu_config))); return fusion; } } absl::StatusOr<bool> DynamicSliceFusionRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { absl::flat_hash_map<HloInstruction*, std::pair<UseDefDataflowPaths, DefUseDataflowPaths>> matches; for (HloComputation* computation : module->computations()) { if (computation->IsFusionComputation()) continue; for (HloInstruction* instr : computation->instructions()) { if (IsLegacyCublasMatmul(*instr) || (IsCustomCall(instr, platform_name_))) { UseDefDataflowPaths sliced_operand_paths = GetSlicedOperandPaths(instr); bool has_sliced_operand_paths = sliced_operand_paths.size() > 1; DefUseDataflowPaths sliced_user_paths = GetSlicedUserPaths(instr); bool has_sliced_user_paths = absl::c_any_of( sliced_user_paths, [&](auto& sliced_user_path) { return !sliced_user_path.empty(); }); if (absl::c_any_of(sliced_user_paths, [&](auto& sliced_user_path) { return DynCast<HloDynamicUpdateSliceInstruction>( sliced_user_path.back()) == nullptr; })) { return absl::InternalError( "Expect sliced user path to end with a DUS."); } if (has_sliced_operand_paths || has_sliced_user_paths) { matches[instr] = std::make_pair(std::move(sliced_operand_paths), std::move(sliced_user_paths)); } } } } if (matches.empty()) return false; for (auto& [hero, paths] : matches) { auto& [sliced_operand_paths, sliced_user_paths] = paths; std::vector<HloInstruction*> matched_instrs; absl::c_copy(sliced_operand_paths, std::back_inserter(matched_instrs)); std::vector<DataflowPathView> sliced_user_paths_view; for (auto& sliced_user_path : sliced_user_paths) { absl::c_copy(sliced_user_path, std::back_inserter(matched_instrs)); DataflowPathView sliced_user_path_view{&sliced_user_path.front(), sliced_user_path.size()}; sliced_user_paths_view.push_back(std::move(sliced_user_path_view)); } auto captures = GetPatternCaptures(matched_instrs); TF_ASSIGN_OR_RETURN( HloComputation * fusion_body, CreateFusionBody(module, sliced_operand_paths, DataflowPathsView(sliced_user_paths_view), captures)); bool has_dynamic_slices = absl::c_any_of(matched_instrs, [&](auto* instr) { return DynCast<HloDynamicIndexInstruction>(instr) != nullptr; }); TF_ASSIGN_OR_RETURN( HloInstruction * fusion, CreateFusionInstruction(module, hero, captures, fusion_body, has_dynamic_slices)); HloComputation* parent = hero->parent(); if (fusion->shape().IsTuple()) { TF_RETURN_IF_ERROR(parent->ReplaceInstructionWithDifferentShape( const_cast<HloInstruction*>(hero), fusion)); for (auto& sliced_user_path : sliced_user_paths) { auto old_gte = Cast<HloGetTupleElementInstruction>(sliced_user_path.front()); HloInstruction* gte = parent->AddInstruction(HloInstruction::CreateGetTupleElement( fusion, old_gte->tuple_index())); TF_RETURN_IF_ERROR( parent->ReplaceInstruction(sliced_user_path.back(), gte)); } } else { auto* instr_to_be_replaced = const_cast<HloInstruction*>(hero); if (sliced_user_paths.empty()) { if (hero->shape().IsTuple()) { if (hero->user_count() != 1 || !DynCast<HloGetTupleElementInstruction>(hero->users().front())) { return absl::InternalError( "Expect a single get-tuple-element user of the original " "tuple-shaped hero op when address computation fusion does " "not return a tuple"); } instr_to_be_replaced = hero->users().front(); } } else { instr_to_be_replaced = sliced_user_paths.front().back(); } TF_RETURN_IF_ERROR( parent->ReplaceInstruction(instr_to_be_replaced, fusion)); } } return true; } } }
#include "xla/service/gpu/dynamic_slice_fusion_rewriter.h" #include <cstddef> #include <cstdint> #include <functional> #include <optional> #include <utility> #include "absl/algorithm/container.h" #include "absl/status/status.h" #include "xla/client/lib/constants.h" #include "xla/client/xla_builder.h" #include "xla/ffi/ffi.h" #include "xla/ffi/ffi_api.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/service/buffer_value.h" #include "xla/service/custom_call_target_registry.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/hlo_memory_scheduler.h" #include "xla/service/hlo_module_config.h" #include "xla/service/service_executable_run_options.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/gpu/gpu_types.h" #include "xla/stream_executor/stream.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #define PLATFORM "GPU" namespace xla::gpu { class DynamicSliceFusionRewriterTest : public HloTestBase {}; TEST_F(DynamicSliceFusionRewriterTest, SimpleGemm) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWithWorkspace) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWorkspaceIgnored) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(%custom-call.1), index=0 } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: ROOT [[DOT_MAIN:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[FUSION]]), index=0 ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNotRoot) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1) } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandHasMultipleUsers) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[4,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[2:3], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %bitcast.41) } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[2:3], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[P0]], [[P1]]) ; CHECK-DAG: kind=kCustom, calls=%address-computation, ; CHECK-DAG: backend_config={ ; CHECK-DAG: "kind":"__custom_fusion", ; CHECK-DAG: "custom_fusion_config":{"name":"address_computation"} ; CHECK-DAG: } ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[B0]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandsHaveMultipleUsers) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.0 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: %address-computation{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmSlicingNotParameter) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[4,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.12 = f16[2,8,8]{2,1,0} slice(%p0), slice={[0:2], [0:8], [0:8]} %slice.13 = f16[1,8,8]{2,1,0} slice(%slice.12), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1) } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[2,8,8]{2,1,0} slice([[P0]]), slice={[0:2], [0:8], [0:8]} ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[S0]], [[P1]]) ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNotContiguousSlice) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,4,6]{2,1,0} slice(%p0), slice={[1:2], [0:4], [0:6]} %bitcast.41 = f16[4,6]{1,0} bitcast(%slice.13) %slice.14 = f16[1,6,4]{2,1,0} slice(%p1), slice={[1:2], [0:6], [0:4]} %bitcast.42 = f16[6,4]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[4,4]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), std::nullopt); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNonNoOpInSliceChain) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %slice.14 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %add.0 = f16[1,8,8]{2,1,0} add(%slice.13, %slice.14) %bitcast.41 = f16[8,8]{1,0} bitcast(%add.0) %slice.15 = f16[1,8,8]{2,1,0} slice(%p1), slice={[0:1], [0:8], [0:8]} %slice.16 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %add.1 = f16[1,8,8]{2,1,0} add(%slice.15, %slice.16) %bitcast.42 = f16[8,8]{1,0} bitcast(%add.1) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), std::nullopt); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmDuplicateOperand) { const char* hlo = R"( HloModule test ENTRY %main { %p0 = (f32[100,100]{1,0}, f32[100,100]{1,0}) parameter(0) %get-tuple-element.240 = f32[100,100]{1,0} get-tuple-element(%p0), index=0 %get-tuple-element.241 = f32[100,100]{1,0} get-tuple-element(%p0), index=1 %concatenate.10 = f32[200,100]{1,0} concatenate(%get-tuple-element.240, %get-tuple-element.241), dimensions={0} %custom-call.16 = (f32[200,100]{1,0}, s8[120000]{0}) custom-call(%concatenate.10, %get-tuple-element.240), custom_call_target="__cublas$gemm", backend_config={ "gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["HIGHEST","HIGHEST"]}, "epilogue":"DEFAULT", "lhs_stride":"20000", "rhs_stride":"10000", "grad_x":false, "grad_y":false } } %get-tuple-element.97 = f32[200,100]{1,0} get-tuple-element(%custom-call.16), index=0 %slice.26 = f32[100,100]{1,0} slice(%get-tuple-element.97), slice={[0:100], [0:100]} ROOT %custom-call.17 = (f32[100,100]{1,0}, s8[80000]{0}) custom-call(%slice.26, %slice.26), custom_call_target="__cublas$gemm", backend_config={ "gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["HIGHEST","HIGHEST"]}, "epilogue":"DEFAULT", "lhs_stride":"10000", "rhs_stride":"10000", "grad_x":false, "grad_y":false } } })"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK: [[P0:%[^ ]+]] = f32[200,100]{1,0} parameter(0) ; CHECK: [[S0:%[^ ]+]] = f32[100,100]{1,0} slice([[P0]]), slice={[0:100], [0:100]} ; CHECK-NOT: slice ; CHECK: [[CC:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) custom-call([[S0]], [[S0]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %p1 = f16[2,8,8]{2,1,0} parameter(0) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[0:1], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]]) ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder2) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[0:1], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]]) ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: }
absl::StatusOr<bool> DynamicSliceFusionRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { absl::flat_hash_map<HloInstruction*, std::pair<UseDefDataflowPaths, DefUseDataflowPaths>> matches; for (HloComputation* computation : module->computations()) { if (computation->IsFusionComputation()) continue; for (HloInstruction* instr : computation->instructions()) { if (IsLegacyCublasMatmul(*instr) || (IsCustomCall(instr, platform_name_))) { UseDefDataflowPaths sliced_operand_paths = GetSlicedOperandPaths(instr); bool has_sliced_operand_paths = sliced_operand_paths.size() > 1; DefUseDataflowPaths sliced_user_paths = GetSlicedUserPaths(instr); bool has_sliced_user_paths = absl::c_any_of( sliced_user_paths, [&](auto& sliced_user_path) { return !sliced_user_path.empty(); }); if (absl::c_any_of(sliced_user_paths, [&](auto& sliced_user_path) { return DynCast<HloDynamicUpdateSliceInstruction>( sliced_user_path.back()) == nullptr; })) { return absl::InternalError( "Expect sliced user path to end with a DUS."); } if (has_sliced_operand_paths || has_sliced_user_paths) { matches[instr] = std::make_pair(std::move(sliced_operand_paths), std::move(sliced_user_paths)); } } } } if (matches.empty()) return false; for (auto& [hero, paths] : matches) { auto& [sliced_operand_paths, sliced_user_paths] = paths; std::vector<HloInstruction*> matched_instrs; absl::c_copy(sliced_operand_paths, std::back_inserter(matched_instrs)); std::vector<DataflowPathView> sliced_user_paths_view; for (auto& sliced_user_path : sliced_user_paths) { absl::c_copy(sliced_user_path, std::back_inserter(matched_instrs)); DataflowPathView sliced_user_path_view{&sliced_user_path.front(), sliced_user_path.size()}; sliced_user_paths_view.push_back(std::move(sliced_user_path_view)); } auto captures = GetPatternCaptures(matched_instrs); TF_ASSIGN_OR_RETURN( HloComputation * fusion_body, CreateFusionBody(module, sliced_operand_paths, DataflowPathsView(sliced_user_paths_view), captures)); bool has_dynamic_slices = absl::c_any_of(matched_instrs, [&](auto* instr) { return DynCast<HloDynamicIndexInstruction>(instr) != nullptr; }); TF_ASSIGN_OR_RETURN( HloInstruction * fusion, CreateFusionInstruction(module, hero, captures, fusion_body, has_dynamic_slices)); HloComputation* parent = hero->parent(); if (fusion->shape().IsTuple()) { TF_RETURN_IF_ERROR(parent->ReplaceInstructionWithDifferentShape( const_cast<HloInstruction*>(hero), fusion)); for (auto& sliced_user_path : sliced_user_paths) { auto old_gte = Cast<HloGetTupleElementInstruction>(sliced_user_path.front()); HloInstruction* gte = parent->AddInstruction(HloInstruction::CreateGetTupleElement( fusion, old_gte->tuple_index())); TF_RETURN_IF_ERROR( parent->ReplaceInstruction(sliced_user_path.back(), gte)); } } else { auto* instr_to_be_replaced = const_cast<HloInstruction*>(hero); if (sliced_user_paths.empty()) { if (hero->shape().IsTuple()) { if (hero->user_count() != 1 || !DynCast<HloGetTupleElementInstruction>(hero->users().front())) { return absl::InternalError( "Expect a single get-tuple-element user of the original " "tuple-shaped hero op when address computation fusion does " "not return a tuple"); } instr_to_be_replaced = hero->users().front(); } } else { instr_to_be_replaced = sliced_user_paths.front().back(); } TF_RETURN_IF_ERROR( parent->ReplaceInstruction(instr_to_be_replaced, fusion)); } } return true; }
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemm) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWithWorkspace) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWorkspaceIgnored) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(%custom-call.1), index=0 } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: ROOT [[DOT_MAIN:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[FUSION]]), index=0 ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNotRoot) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1) } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandHasMultipleUsers) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[4,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[2:3], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %bitcast.41) } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[2:3], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[P0]], [[P1]]) ; CHECK-DAG: kind=kCustom, calls=%address-computation, ; CHECK-DAG: backend_config={ ; CHECK-DAG: "kind":"__custom_fusion", ; CHECK-DAG: "custom_fusion_config":{"name":"address_computation"} ; CHECK-DAG: } ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[B0]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandsHaveMultipleUsers) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.0 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: %address-computation{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmSlicingNotParameter) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[4,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.12 = f16[2,8,8]{2,1,0} slice(%p0), slice={[0:2], [0:8], [0:8]} %slice.13 = f16[1,8,8]{2,1,0} slice(%slice.12), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1) } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[2,8,8]{2,1,0} slice([[P0]]), slice={[0:2], [0:8], [0:8]} ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[S0]], [[P1]]) ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNonNoOpInSliceChain) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %slice.14 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %add.0 = f16[1,8,8]{2,1,0} add(%slice.13, %slice.14) %bitcast.41 = f16[8,8]{1,0} bitcast(%add.0) %slice.15 = f16[1,8,8]{2,1,0} slice(%p1), slice={[0:1], [0:8], [0:8]} %slice.16 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %add.1 = f16[1,8,8]{2,1,0} add(%slice.15, %slice.16) %bitcast.42 = f16[8,8]{1,0} bitcast(%add.1) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), std::nullopt); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmDuplicateOperand) { const char* hlo = R"( HloModule test ENTRY %main { %p0 = (f32[100,100]{1,0}, f32[100,100]{1,0}) parameter(0) %get-tuple-element.240 = f32[100,100]{1,0} get-tuple-element(%p0), index=0 %get-tuple-element.241 = f32[100,100]{1,0} get-tuple-element(%p0), index=1 %concatenate.10 = f32[200,100]{1,0} concatenate(%get-tuple-element.240, %get-tuple-element.241), dimensions={0} %custom-call.16 = (f32[200,100]{1,0}, s8[120000]{0}) custom-call(%concatenate.10, %get-tuple-element.240), custom_call_target="__cublas$gemm", backend_config={ "gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["HIGHEST","HIGHEST"]}, "epilogue":"DEFAULT", "lhs_stride":"20000", "rhs_stride":"10000", "grad_x":false, "grad_y":false } } %get-tuple-element.97 = f32[200,100]{1,0} get-tuple-element(%custom-call.16), index=0 %slice.26 = f32[100,100]{1,0} slice(%get-tuple-element.97), slice={[0:100], [0:100]} ROOT %custom-call.17 = (f32[100,100]{1,0}, s8[80000]{0}) custom-call(%slice.26, %slice.26), custom_call_target="__cublas$gemm", backend_config={ "gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["HIGHEST","HIGHEST"]}, "epilogue":"DEFAULT", "lhs_stride":"10000", "rhs_stride":"10000", "grad_x":false, "grad_y":false } } })"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK: [[P0:%[^ ]+]] = f32[200,100]{1,0} parameter(0) ; CHECK: [[S0:%[^ ]+]] = f32[100,100]{1,0} slice([[P0]]), slice={[0:100], [0:100]} ; CHECK-NOT: slice ; CHECK: [[CC:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) custom-call([[S0]], [[S0]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) fusion ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %p1 = f16[2,8,8]{2,1,0} parameter(0) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[0:1], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]]) ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder2) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %address-computation {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[0:1], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]]) ; CHECK: kind=kCustom, calls=%address-computation, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation"} ; CHECK: } ; CHECK: }
#include "arolla/qtype/dict/dict_types.h" #include <cstdint> #include <memory> #include <string> #include <utility> #include "absl/base/thread_annotations.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/synchronization/mutex.h" #include "arolla/dense_array/qtype/types.h" #include "arolla/qtype/derived_qtype.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/tuple_qtype.h" #include "arolla/util/bytes.h" #include "arolla/util/fast_dynamic_downcast_final.h" #include "arolla/util/indestructible.h" #include "arolla/util/text.h" #include "arolla/util/status_macros_backport.h" namespace arolla { namespace { class KeyToRowDictTypeRegistry { public: static KeyToRowDictTypeRegistry& instance() { static Indestructible<KeyToRowDictTypeRegistry> result; return *result; } absl::Status Register(QTypePtr key_qtype, QTypePtr dict_qtype) { absl::MutexLock l(&lock_); auto [iter, inserted] = dict_types_.emplace(key_qtype, dict_qtype); if (!inserted) { return absl::FailedPreconditionError(absl::StrFormat( "attempt to register %s dict twice", dict_qtype->name())); } return absl::OkStatus(); } absl::StatusOr<QTypePtr> Get(QTypePtr qtype) { absl::ReaderMutexLock l(&lock_); auto iter = dict_types_.find(qtype); if (iter == dict_types_.end()) { return absl::NotFoundError( absl::StrFormat("no dict with %s keys found", qtype->name())); } return iter->second; } private: absl::Mutex lock_; absl::flat_hash_map<QTypePtr, QTypePtr> dict_types_ ABSL_GUARDED_BY(lock_); }; class DictQType final : public BasicDerivedQType { public: DictQType(std::string name, QTypePtr dict_type, QTypePtr values_array_type) : BasicDerivedQType(ConstructorArgs{ .name = std::move(name), .base_qtype = MakeTupleQType({dict_type, values_array_type}), .qtype_specialization_key = "::arolla::DictQType", }) {} }; class DictQTypeRegistry { public: static DictQTypeRegistry& instance() { static Indestructible<DictQTypeRegistry> result; return *result; } absl::StatusOr<QTypePtr> GetQType(QTypePtr key_type, QTypePtr value_type) { { absl::ReaderMutexLock guard(&lock_); if (const auto it = registry_.find({key_type, value_type}); it != registry_.end()) { return it->second.get(); } } ASSIGN_OR_RETURN(QTypePtr dict_type, GetKeyToRowDictQType(key_type)); ASSIGN_OR_RETURN(QTypePtr values_array_type, GetDenseArrayQTypeByValueQType(value_type)); auto kv_dict_type = std::make_unique<DictQType>( absl::StrFormat("Dict<%s,%s>", key_type->name(), value_type->name()), dict_type, values_array_type); absl::MutexLock guard(&lock_); return registry_ .emplace(std::make_pair(key_type, value_type), std::move(kv_dict_type)) .first->second.get(); } private: absl::Mutex lock_; absl::flat_hash_map<std::pair<QTypePtr, QTypePtr>, std::unique_ptr<QType>> registry_ ABSL_GUARDED_BY(lock_); }; } namespace dict_impl { void RegisterKeyToRowDictQType(QTypePtr key_type, QTypePtr dict_type) { auto status = KeyToRowDictTypeRegistry::instance().Register(key_type, dict_type); DCHECK_OK(status); } } absl::StatusOr<QTypePtr> GetKeyToRowDictQType(QTypePtr key_type) { return KeyToRowDictTypeRegistry::instance().Get(key_type); } bool IsKeyToRowDictQType(QTypePtr type) { if (type->value_qtype() == nullptr) { return false; } ASSIGN_OR_RETURN(QTypePtr dict_type, GetKeyToRowDictQType(type->value_qtype()), false); return dict_type == type; } absl::StatusOr<QTypePtr> GetDictQType(QTypePtr key_type, QTypePtr value_type) { return DictQTypeRegistry::instance().GetQType(key_type, value_type); } const QType* GetDictKeyQTypeOrNull(QTypePtr dict_type) { auto d = fast_dynamic_downcast_final<const DictQType*>(dict_type); return d != nullptr ? d->type_fields()[0].GetType()->value_qtype() : nullptr; } const QType* GetDictValueQTypeOrNull(QTypePtr dict_type) { auto d = fast_dynamic_downcast_final<const DictQType*>(dict_type); return d != nullptr ? d->type_fields()[1].GetType()->value_qtype() : nullptr; } bool IsDictQType(const QType* qtype) { return fast_dynamic_downcast_final<const DictQType*>(qtype) != nullptr; } template struct QTypeTraits<KeyToRowDict<bool>>; template struct QTypeTraits<KeyToRowDict<int32_t>>; template struct QTypeTraits<KeyToRowDict<int64_t>>; template struct QTypeTraits<KeyToRowDict<Bytes>>; template struct QTypeTraits<KeyToRowDict<Text>>; }
#include "arolla/qtype/dict/dict_types.h" #include <cstdint> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "arolla/dense_array/qtype/types.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/typed_slot.h" #include "arolla/util/bytes.h" #include "arolla/util/repr.h" #include "arolla/util/testing/status_matchers_backport.h" #include "arolla/util/unit.h" namespace arolla { namespace { using ::arolla::testing::IsOkAndHolds; using ::arolla::testing::StatusIs; using ::testing::ElementsAre; using ::testing::Eq; using ::testing::HasSubstr; using ::testing::Ne; using ::testing::Property; TEST(DictTypes, GetKeyToRowDictQType) { GetKeyToRowDictQType<int64_t>(); EXPECT_THAT(GetKeyToRowDictQType<int64_t>()->value_qtype(), Eq(GetQType<int64_t>())); EXPECT_THAT(GetKeyToRowDictQType(GetQType<int64_t>()), IsOkAndHolds(GetQType<KeyToRowDict<int64_t>>())); EXPECT_THAT(GetKeyToRowDictQType(GetQType<int64_t>()), IsOkAndHolds(GetKeyToRowDictQType<int64_t>())); EXPECT_THAT(GetKeyToRowDictQType(GetQType<KeyToRowDict<int64_t>>()), StatusIs(absl::StatusCode::kNotFound, HasSubstr("no dict with DICT_INT64 keys found"))); } TEST(DictTypes, GetDictQType) { GetKeyToRowDictQType<int64_t>(); GetDenseArrayQType<float>(); GetDenseArrayQType<double>(); ASSERT_OK_AND_ASSIGN(QTypePtr int_to_float_dict, GetDictQType(GetQType<int64_t>(), GetQType<float>())); EXPECT_THAT(int_to_float_dict->name(), Eq("Dict<INT64,FLOAT32>")); EXPECT_THAT(GetDictKeyQTypeOrNull(int_to_float_dict), Eq(GetQType<int64_t>())); EXPECT_THAT(GetDictValueQTypeOrNull(int_to_float_dict), Eq(GetQType<float>())); EXPECT_THAT( int_to_float_dict->type_fields(), ElementsAre( Property(&TypedSlot::GetType, Eq(GetKeyToRowDictQType<int64_t>())), Property(&TypedSlot::GetType, Eq(GetDenseArrayQType<float>())))); EXPECT_THAT(GetDictQType(GetQType<int64_t>(), GetQType<float>()), IsOkAndHolds(Eq(int_to_float_dict))); EXPECT_THAT(GetDictQType(GetQType<int64_t>(), GetQType<double>()), IsOkAndHolds(Ne(int_to_float_dict))); } TEST(DictTypes, IsDictQType) { GetKeyToRowDictQType<int64_t>(); GetDenseArrayQType<float>(); GetDenseArrayQType<Unit>(); { ASSERT_OK_AND_ASSIGN(QTypePtr int_to_float_dict, GetDictQType(GetQType<int64_t>(), GetQType<float>())); ASSERT_TRUE(IsDictQType(int_to_float_dict)); } { ASSERT_OK_AND_ASSIGN(QTypePtr int_to_unit_dict, GetDictQType(GetQType<int64_t>(), GetQType<Unit>())); ASSERT_TRUE(IsDictQType(int_to_unit_dict)); } { EXPECT_THAT(GetDictQType(GetQType<Unit>(), GetQType<float>()), StatusIs(absl::StatusCode::kNotFound, HasSubstr("no dict with UNIT keys found"))); } { EXPECT_THAT(GetDictQType(GetQType<float>(), GetQType<float>()), StatusIs(absl::StatusCode::kNotFound, HasSubstr("no dict with FLOAT32 keys found"))); } } TEST(DictTypes, ReprTraits) { EXPECT_EQ(Repr(KeyToRowDict<float>{}), "dict{}"); EXPECT_EQ(Repr(KeyToRowDict<float>{{{0.5, 1}}}), "dict{0.5:int64{1},}"); EXPECT_EQ(Repr(KeyToRowDict<float>{{{0.5, 1}, {2.5, 3}}}), "dict{0.5:int64{1},2.5:int64{3},}"); EXPECT_EQ(Repr(KeyToRowDict<Bytes>{{{Bytes("key"), 2}}}), "dict{b'key':int64{2},}"); } } }
absl::StatusOr<QTypePtr> GetKeyToRowDictQType(QTypePtr key_type) { return KeyToRowDictTypeRegistry::instance().Get(key_type); }
TEST(DictTypes, GetKeyToRowDictQType) { GetKeyToRowDictQType<int64_t>(); EXPECT_THAT(GetKeyToRowDictQType<int64_t>()->value_qtype(), Eq(GetQType<int64_t>())); EXPECT_THAT(GetKeyToRowDictQType(GetQType<int64_t>()), IsOkAndHolds(GetQType<KeyToRowDict<int64_t>>())); EXPECT_THAT(GetKeyToRowDictQType(GetQType<int64_t>()), IsOkAndHolds(GetKeyToRowDictQType<int64_t>())); EXPECT_THAT(GetKeyToRowDictQType(GetQType<KeyToRowDict<int64_t>>()), StatusIs(absl::StatusCode::kNotFound, HasSubstr("no dict with DICT_INT64 keys found"))); }
#include "xla/service/gpu/runtime/for_all_thunks.h" #include <memory> #include <optional> #include "absl/functional/function_ref.h" #include "xla/service/gpu/runtime/command_buffer_thunk.h" #include "xla/service/gpu/runtime/conditional_thunk.h" #include "xla/service/gpu/runtime/dynamic_slice_thunk.h" #include "xla/service/gpu/runtime/sequential_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/gpu/runtime/while_thunk.h" #include "tsl/platform/casts.h" namespace xla::gpu { void ForAllThunks(absl::FunctionRef<void(const Thunk*)> fn, const Thunk* thunk) { fn(thunk); switch (thunk->kind()) { case Thunk::kAddressComputation: ForAllThunks(fn, tensorflow::down_cast<const DynamicSliceThunk*>(thunk) ->embedded_thunk()); break; case Thunk::kCommandBuffer: if (const std::unique_ptr<SequentialThunk>& sequence = tensorflow::down_cast<const CommandBufferThunk*>(thunk)->thunks(); sequence != nullptr) { ForAllThunks(fn, sequence.get()); } break; case Thunk::kConditional: for (const std::unique_ptr<SequentialThunk>& branch : tensorflow::down_cast<const ConditionalThunk*>(thunk) ->branch_thunks()) { ForAllThunks(fn, branch.get()); } break; case Thunk::kSequential: ForAllThunks( fn, &tensorflow::down_cast<const SequentialThunk*>(thunk)->thunks()); break; case Thunk::kWhile: ForAllThunks(fn, tensorflow::down_cast<const WhileThunk*>(thunk) ->condition_thunk_sequence()); ForAllThunks(fn, tensorflow::down_cast<const WhileThunk*>(thunk) ->body_thunk_sequence()); break; case Thunk::kCholesky: case Thunk::kConvolution: case Thunk::kConvolutionReorder: case Thunk::kCopy: case Thunk::kCopyDone: case Thunk::kCubSort: case Thunk::kCublasLtMatmul: case Thunk::kCustomCall: case Thunk::kCustomKernel: case Thunk::kCuDnn: case Thunk::kFft: case Thunk::kFusedMHA: case Thunk::kGemm: case Thunk::kInfeed: case Thunk::kKernel: case Thunk::kMemset32BitValue: case Thunk::kMemzero: case Thunk::kNcclAllGather: case Thunk::kNcclAllGatherStart: case Thunk::kNcclAllGatherDone: case Thunk::kNcclAllReduce: case Thunk::kNcclAllReduceStart: case Thunk::kNcclAllReduceDone: case Thunk::kNcclCollectiveBroadcast: case Thunk::kNcclCollectiveBroadcastStart: case Thunk::kNcclCollectiveBroadcastDone: case Thunk::kNcclCollectivePermute: case Thunk::kNcclCollectivePermuteStart: case Thunk::kNcclCollectivePermuteDone: case Thunk::kNcclReduceScatter: case Thunk::kNcclReduceScatterStart: case Thunk::kNcclReduceScatterDone: case Thunk::kNcclAllToAll: case Thunk::kNcclAllToAllStart: case Thunk::kNcclAllToAllDone: case Thunk::kNcclSend: case Thunk::kNcclSendDone: case Thunk::kNcclRecv: case Thunk::kNcclRecvDone: case Thunk::kNorm: case Thunk::kOutfeed: case Thunk::kPartitionId: case Thunk::kRecv: case Thunk::kRecvDone: case Thunk::kReplicaId: case Thunk::kSend: case Thunk::kSendDone: case Thunk::kTriangularSolve: case Thunk::kWaitForStreams: break; } } void ForAllThunks(absl::FunctionRef<void(const Thunk*)> fn, const ThunkSequence* thunks) { for (const std::unique_ptr<Thunk>& thunk : *thunks) { ForAllThunks(fn, thunk.get()); } } }
#include "xla/service/gpu/runtime/for_all_thunks.h" #include <memory> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "xla/service/buffer_assignment.h" #include "xla/service/gpu/runtime/command_buffer_cmd.h" #include "xla/service/gpu/runtime/command_buffer_thunk.h" #include "xla/service/gpu/runtime/conditional_thunk.h" #include "xla/service/gpu/runtime/dynamic_slice_thunk.h" #include "xla/service/gpu/runtime/sequential_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/gpu/runtime/while_thunk.h" namespace xla::gpu { namespace { using ::testing::IsSupersetOf; using ::testing::UnorderedElementsAre; std::vector<const Thunk*> GetAllThunks(Thunk* root) { std::vector<const Thunk*> thunks; ForAllThunks([&](const Thunk* thunk) { thunks.push_back(thunk); }, root); return thunks; } struct DummyThunk : public Thunk { DummyThunk() : Thunk(Thunk::Kind::kGemm, Thunk::ThunkInfo()) {} absl::Status ExecuteOnStream(const ExecuteParams& params) override { return absl::OkStatus(); } }; TEST(ForAllThunksTest, SingleThunk) { DummyThunk thunk; EXPECT_THAT(GetAllThunks(&thunk), UnorderedElementsAre(&thunk)); } TEST(ForAllThunksTest, DynamicSliceThunk) { auto thunk = std::make_unique<DummyThunk>(); Thunk* thunk_ptr = thunk.get(); auto thunk_sequence = std::make_unique<ThunkSequence>(); thunk_sequence->push_back(std::move(thunk)); DynamicSliceThunk dynamic_slice_thunk( Thunk::ThunkInfo(), std::move(thunk_sequence), {}, {}, {}, {}, {}, {}); EXPECT_THAT(GetAllThunks(&dynamic_slice_thunk), IsSupersetOf<const Thunk*>({thunk_ptr, &dynamic_slice_thunk})); } TEST(ForAllThunksTest, CommandBufferThunk) { auto thunk = std::make_unique<DummyThunk>(); Thunk* thunk_ptr = thunk.get(); ThunkSequence thunk_sequence; thunk_sequence.push_back(std::move(thunk)); auto sequential_thunk = std::make_unique<SequentialThunk>( Thunk::ThunkInfo(), std::move(thunk_sequence)); Thunk* sequential_thunk_ptr = sequential_thunk.get(); CommandBufferThunk command_buffer_thunk(CommandBufferCmdSequence(), Thunk::ThunkInfo(), std::move(sequential_thunk)); EXPECT_THAT(GetAllThunks(&command_buffer_thunk), UnorderedElementsAre(thunk_ptr, &command_buffer_thunk, sequential_thunk_ptr)); } TEST(ForAllThunksTest, ConditionalThunk) { auto thunk = std::make_unique<DummyThunk>(); Thunk* thunk_ptr = thunk.get(); ThunkSequence thunk_sequence; thunk_sequence.push_back(std::move(thunk)); auto sequential_thunk = std::make_unique<SequentialThunk>( Thunk::ThunkInfo(), std::move(thunk_sequence)); SequentialThunk* sequential_thunk_ptr = sequential_thunk.get(); ConditionalThunkConfig config; config.branch_thunks.push_back(std::move(sequential_thunk)); ConditionalThunk conditional_thunk(Thunk::ThunkInfo(), std::move(config), BufferAllocation::Slice()); EXPECT_THAT(GetAllThunks(&conditional_thunk), UnorderedElementsAre(thunk_ptr, sequential_thunk_ptr, &conditional_thunk)); } TEST(ForAllThunksTest, WhileThunk) { auto condition_thunk = std::make_unique<DummyThunk>(); Thunk* condition_thunk_ptr = condition_thunk.get(); ThunkSequence condition_thunk_sequence; condition_thunk_sequence.push_back(std::move(condition_thunk)); auto body_thunk = std::make_unique<DummyThunk>(); Thunk* body_thunk_ptr = body_thunk.get(); ThunkSequence body_thunk_sequence; body_thunk_sequence.push_back(std::move(body_thunk)); WhileThunk while_thunk( Thunk::ThunkInfo(), BufferAllocation::Slice(), std::make_unique<SequentialThunk>(Thunk::ThunkInfo(), std::move(condition_thunk_sequence)), std::make_unique<SequentialThunk>(Thunk::ThunkInfo(), std::move(body_thunk_sequence))); EXPECT_THAT(GetAllThunks(&while_thunk), IsSupersetOf<const Thunk*>( {condition_thunk_ptr, body_thunk_ptr, &while_thunk})); } } }
void ForAllThunks(absl::FunctionRef<void(const Thunk*)> fn, const ThunkSequence* thunks) { for (const std::unique_ptr<Thunk>& thunk : *thunks) { ForAllThunks(fn, thunk.get()); } }
TEST(ForAllThunksTest, DynamicSliceThunk) { auto thunk = std::make_unique<DummyThunk>(); Thunk* thunk_ptr = thunk.get(); auto thunk_sequence = std::make_unique<ThunkSequence>(); thunk_sequence->push_back(std::move(thunk)); DynamicSliceThunk dynamic_slice_thunk( Thunk::ThunkInfo(), std::move(thunk_sequence), {}, {}, {}, {}, {}, {}); EXPECT_THAT(GetAllThunks(&dynamic_slice_thunk), IsSupersetOf<const Thunk*>({thunk_ptr, &dynamic_slice_thunk})); } TEST(ForAllThunksTest, CommandBufferThunk) { auto thunk = std::make_unique<DummyThunk>(); Thunk* thunk_ptr = thunk.get(); ThunkSequence thunk_sequence; thunk_sequence.push_back(std::move(thunk)); auto sequential_thunk = std::make_unique<SequentialThunk>( Thunk::ThunkInfo(), std::move(thunk_sequence)); Thunk* sequential_thunk_ptr = sequential_thunk.get(); CommandBufferThunk command_buffer_thunk(CommandBufferCmdSequence(), Thunk::ThunkInfo(), std::move(sequential_thunk)); EXPECT_THAT(GetAllThunks(&command_buffer_thunk), UnorderedElementsAre(thunk_ptr, &command_buffer_thunk, sequential_thunk_ptr)); } TEST(ForAllThunksTest, ConditionalThunk) { auto thunk = std::make_unique<DummyThunk>(); Thunk* thunk_ptr = thunk.get(); ThunkSequence thunk_sequence; thunk_sequence.push_back(std::move(thunk)); auto sequential_thunk = std::make_unique<SequentialThunk>( Thunk::ThunkInfo(), std::move(thunk_sequence)); SequentialThunk* sequential_thunk_ptr = sequential_thunk.get(); ConditionalThunkConfig config; config.branch_thunks.push_back(std::move(sequential_thunk)); ConditionalThunk conditional_thunk(Thunk::ThunkInfo(), std::move(config), BufferAllocation::Slice()); EXPECT_THAT(GetAllThunks(&conditional_thunk), UnorderedElementsAre(thunk_ptr, sequential_thunk_ptr, &conditional_thunk)); } TEST(ForAllThunksTest, WhileThunk) { auto condition_thunk = std::make_unique<DummyThunk>(); Thunk* condition_thunk_ptr = condition_thunk.get(); ThunkSequence condition_thunk_sequence; condition_thunk_sequence.push_back(std::move(condition_thunk)); auto body_thunk = std::make_unique<DummyThunk>(); Thunk* body_thunk_ptr = body_thunk.get(); ThunkSequence body_thunk_sequence; body_thunk_sequence.push_back(std::move(body_thunk)); WhileThunk while_thunk( Thunk::ThunkInfo(), BufferAllocation::Slice(), std::make_unique<SequentialThunk>(Thunk::ThunkInfo(), std::move(condition_thunk_sequence)), std::make_unique<SequentialThunk>(Thunk::ThunkInfo(), std::move(body_thunk_sequence))); EXPECT_THAT(GetAllThunks(&while_thunk), IsSupersetOf<const Thunk*>( {condition_thunk_ptr, body_thunk_ptr, &while_thunk})); }
#include "tensorflow/core/lib/wav/wav_io.h" #include <math.h> #include <string.h> #include <algorithm> #include "absl/base/casts.h" #include "tensorflow/core/lib/core/coding.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/byte_order.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" namespace tensorflow { namespace wav { namespace { struct TF_PACKED RiffChunk { char chunk_id[4]; char chunk_data_size[4]; char riff_type[4]; }; static_assert(sizeof(RiffChunk) == 12, "TF_PACKED does not work."); struct TF_PACKED FormatChunk { char chunk_id[4]; char chunk_data_size[4]; char compression_code[2]; char channel_numbers[2]; char sample_rate[4]; char bytes_per_second[4]; char bytes_per_frame[2]; char bits_per_sample[2]; }; static_assert(sizeof(FormatChunk) == 24, "TF_PACKED does not work."); struct TF_PACKED DataChunk { char chunk_id[4]; char chunk_data_size[4]; }; static_assert(sizeof(DataChunk) == 8, "TF_PACKED does not work."); struct TF_PACKED WavHeader { RiffChunk riff_chunk; FormatChunk format_chunk; DataChunk data_chunk; }; static_assert(sizeof(WavHeader) == sizeof(RiffChunk) + sizeof(FormatChunk) + sizeof(DataChunk), "TF_PACKED does not work."); constexpr char kRiffChunkId[] = "RIFF"; constexpr char kRiffType[] = "WAVE"; constexpr char kFormatChunkId[] = "fmt "; constexpr char kDataChunkId[] = "data"; inline int16 FloatToInt16Sample(float data) { constexpr float kMultiplier = 1.0f * (1 << 15); return std::min<float>(std::max<float>(roundf(data * kMultiplier), kint16min), kint16max); } inline float Int16SampleToFloat(int16_t data) { constexpr float kMultiplier = 1.0f / (1 << 15); return data * kMultiplier; } } Status IncrementOffset(int old_offset, int64_t increment, size_t max_size, int* new_offset) { if (old_offset < 0) { return errors::InvalidArgument("Negative offsets are not allowed: ", old_offset); } if (increment < 0) { return errors::InvalidArgument("Negative increment is not allowed: ", increment); } if (old_offset > max_size) { return errors::InvalidArgument("Initial offset is outside data range: ", old_offset); } int64_t sum = old_offset + increment; if (sum > max_size) { return errors::InvalidArgument("Data too short when trying to read string"); } if (sum < 0) { return errors::InvalidArgument("Offset too large, overflowed: ", sum); } *new_offset = sum; return absl::OkStatus(); } Status ExpectText(const std::string& data, const std::string& expected_text, int* offset) { int new_offset; TF_RETURN_IF_ERROR( IncrementOffset(*offset, expected_text.size(), data.size(), &new_offset)); const std::string found_text(data.begin() + *offset, data.begin() + new_offset); if (found_text != expected_text) { return errors::InvalidArgument("Header mismatch: Expected ", expected_text, " but found ", found_text); } *offset = new_offset; return absl::OkStatus(); } Status ReadString(const std::string& data, int expected_length, std::string* value, int* offset) { int new_offset; TF_RETURN_IF_ERROR( IncrementOffset(*offset, expected_length, data.size(), &new_offset)); *value = std::string(data.begin() + *offset, data.begin() + new_offset); *offset = new_offset; return absl::OkStatus(); } template <typename T> Status EncodeAudioAsS16LEWav(const float* audio, size_t sample_rate, size_t num_channels, size_t num_frames, T* wav_string) { constexpr size_t kFormatChunkSize = 16; constexpr size_t kCompressionCodePcm = 1; constexpr size_t kBitsPerSample = 16; constexpr size_t kBytesPerSample = kBitsPerSample / 8; constexpr size_t kHeaderSize = sizeof(WavHeader); if (audio == nullptr && num_frames > 0) { return errors::InvalidArgument("audio is null"); } if (wav_string == nullptr) { return errors::InvalidArgument("wav_string is null"); } if (sample_rate == 0 || sample_rate > kuint32max) { return errors::InvalidArgument("sample_rate must be in (0, 2^32), got: ", sample_rate); } if (num_channels == 0 || num_channels > kuint16max) { return errors::InvalidArgument("num_channels must be in (0, 2^16), got: ", num_channels); } const size_t bytes_per_second = sample_rate * kBytesPerSample * num_channels; const size_t num_samples = num_frames * num_channels; const size_t data_size = num_samples * kBytesPerSample; const size_t file_size = kHeaderSize + num_samples * kBytesPerSample; const size_t bytes_per_frame = kBytesPerSample * num_channels; if (file_size > kuint32max) { return errors::InvalidArgument( "Provided channels and frames cannot be encoded as a WAV."); } wav_string->resize(file_size); char* data = &(*wav_string)[0]; WavHeader* header = absl::bit_cast<WavHeader*>(data); auto* riff_chunk = &header->riff_chunk; memcpy(riff_chunk->chunk_id, kRiffChunkId, 4); core::EncodeFixed32(riff_chunk->chunk_data_size, file_size - 8); memcpy(riff_chunk->riff_type, kRiffType, 4); auto* format_chunk = &header->format_chunk; memcpy(format_chunk->chunk_id, kFormatChunkId, 4); core::EncodeFixed32(format_chunk->chunk_data_size, kFormatChunkSize); core::EncodeFixed16(format_chunk->compression_code, kCompressionCodePcm); core::EncodeFixed16(format_chunk->channel_numbers, num_channels); core::EncodeFixed32(format_chunk->sample_rate, sample_rate); core::EncodeFixed32(format_chunk->bytes_per_second, bytes_per_second); core::EncodeFixed16(format_chunk->bytes_per_frame, bytes_per_frame); core::EncodeFixed16(format_chunk->bits_per_sample, kBitsPerSample); auto* data_chunk = &header->data_chunk; memcpy(data_chunk->chunk_id, kDataChunkId, 4); core::EncodeFixed32(data_chunk->chunk_data_size, data_size); data += kHeaderSize; for (size_t i = 0; i < num_samples; ++i) { int16_t sample = FloatToInt16Sample(audio[i]); core::EncodeFixed16(&data[i * kBytesPerSample], static_cast<uint16>(sample)); } return absl::OkStatus(); } template Status EncodeAudioAsS16LEWav<std::string>(const float* audio, size_t sample_rate, size_t num_channels, size_t num_frames, std::string* wav_string); template Status EncodeAudioAsS16LEWav<tstring>(const float* audio, size_t sample_rate, size_t num_channels, size_t num_frames, tstring* wav_string); Status DecodeLin16WaveAsFloatVector(const std::string& wav_string, std::vector<float>* float_values, uint32* sample_count, uint16* channel_count, uint32* sample_rate) { int offset = 0; TF_RETURN_IF_ERROR(ExpectText(wav_string, kRiffChunkId, &offset)); uint32 total_file_size; TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, &total_file_size, &offset)); TF_RETURN_IF_ERROR(ExpectText(wav_string, kRiffType, &offset)); std::string found_text; TF_RETURN_IF_ERROR(ReadString(wav_string, 4, &found_text, &offset)); while (found_text != kFormatChunkId) { if (found_text != "JUNK" && found_text != "bext" && found_text != "iXML" && found_text != "qlty" && found_text != "mext" && found_text != "levl" && found_text != "link" && found_text != "axml") { return errors::InvalidArgument("Unexpected field ", found_text); } uint32 size_of_chunk; TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, &size_of_chunk, &offset)); TF_RETURN_IF_ERROR( IncrementOffset(offset, size_of_chunk, wav_string.size(), &offset)); TF_RETURN_IF_ERROR(ReadString(wav_string, 4, &found_text, &offset)); } uint32 format_chunk_size; TF_RETURN_IF_ERROR( ReadValue<uint32>(wav_string, &format_chunk_size, &offset)); if ((format_chunk_size != 16) && (format_chunk_size != 18)) { return errors::InvalidArgument( "Bad format chunk size for WAV: Expected 16 or 18, but got", format_chunk_size); } uint16 audio_format; TF_RETURN_IF_ERROR(ReadValue<uint16>(wav_string, &audio_format, &offset)); if (audio_format != 1) { return errors::InvalidArgument( "Bad audio format for WAV: Expected 1 (PCM), but got", audio_format); } TF_RETURN_IF_ERROR(ReadValue<uint16>(wav_string, channel_count, &offset)); if (*channel_count < 1) { return errors::InvalidArgument( "Bad number of channels for WAV: Expected at least 1, but got ", *channel_count); } TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, sample_rate, &offset)); uint32 bytes_per_second; TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, &bytes_per_second, &offset)); uint16 bytes_per_sample; TF_RETURN_IF_ERROR(ReadValue<uint16>(wav_string, &bytes_per_sample, &offset)); uint16 bits_per_sample; TF_RETURN_IF_ERROR(ReadValue<uint16>(wav_string, &bits_per_sample, &offset)); if (bits_per_sample != 16) { return errors::InvalidArgument( "Can only read 16-bit WAV files, but received ", bits_per_sample); } const uint32 expected_bytes_per_sample = ((bits_per_sample * *channel_count) + 7) / 8; if (bytes_per_sample != expected_bytes_per_sample) { return errors::InvalidArgument( "Bad bytes per sample in WAV header: Expected ", expected_bytes_per_sample, " but got ", bytes_per_sample); } const uint64 expected_bytes_per_second = static_cast<uint64>(bytes_per_sample) * *sample_rate; if (static_cast<uint64>(bytes_per_second) != expected_bytes_per_second) { return errors::InvalidArgument( "Bad bytes per second in WAV header: Expected ", expected_bytes_per_second, " but got ", bytes_per_second, " (sample_rate=", *sample_rate, ", bytes_per_sample=", bytes_per_sample, ")"); } if (format_chunk_size == 18) { offset += 2; } bool was_data_found = false; while (offset < wav_string.size()) { std::string chunk_id; TF_RETURN_IF_ERROR(ReadString(wav_string, 4, &chunk_id, &offset)); uint32 chunk_size; TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, &chunk_size, &offset)); if (chunk_size > std::numeric_limits<int32>::max()) { return errors::InvalidArgument( "WAV data chunk '", chunk_id, "' is too large: ", chunk_size, " bytes, but the limit is ", std::numeric_limits<int32>::max()); } if (chunk_id == kDataChunkId) { if (was_data_found) { return errors::InvalidArgument("More than one data chunk found in WAV"); } was_data_found = true; *sample_count = chunk_size / bytes_per_sample; const uint32 data_count = *sample_count * *channel_count; int unused_new_offset = 0; TF_RETURN_IF_ERROR(IncrementOffset(offset, sizeof(int16) * data_count, wav_string.size(), &unused_new_offset)); float_values->resize(data_count); for (int i = 0; i < data_count; ++i) { int16_t single_channel_value = 0; TF_RETURN_IF_ERROR( ReadValue<int16>(wav_string, &single_channel_value, &offset)); (*float_values)[i] = Int16SampleToFloat(single_channel_value); } } else { offset += chunk_size; } } if (!was_data_found) { return errors::InvalidArgument("No data chunk found in WAV"); } return absl::OkStatus(); } } }
#include "tensorflow/core/lib/wav/wav_io.h" #include <string> #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/protobuf/error_codes.pb.h" namespace tensorflow { namespace wav { Status ExpectText(const string& data, const string& expected_text, int* offset); Status ReadString(const string& data, int expected_length, string* value, int* offset); TEST(WavIO, BadArguments) { float audio[] = {0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f}; tstring result; EXPECT_EQ(error::INVALID_ARGUMENT, EncodeAudioAsS16LEWav(nullptr, 44100, 2, 3, &result).code()); TF_EXPECT_OK(EncodeAudioAsS16LEWav(nullptr, 44100, 2, 0, &result)); EXPECT_EQ( error::INVALID_ARGUMENT, EncodeAudioAsS16LEWav(audio, 44100, 2, 3, (tstring*)nullptr).code()); const size_t kuint32max_plus_one = static_cast<size_t>(kuint32max) + 1; const size_t kuint16max_plus_one = static_cast<size_t>(kuint16max) + 1; EXPECT_EQ(error::INVALID_ARGUMENT, EncodeAudioAsS16LEWav(audio, 0, 2, 3, &result).code()); EXPECT_EQ(error::INVALID_ARGUMENT, EncodeAudioAsS16LEWav(audio, 44100, 0, 3, &result).code()); EXPECT_EQ( error::INVALID_ARGUMENT, EncodeAudioAsS16LEWav(audio, kuint32max_plus_one, 2, 3, &result).code()); EXPECT_EQ(error::INVALID_ARGUMENT, EncodeAudioAsS16LEWav(audio, 44100, kuint16max_plus_one, 3, &result) .code()); EXPECT_EQ(error::INVALID_ARGUMENT, EncodeAudioAsS16LEWav(audio, 44100, 2, 1073741813, &result).code()); } TEST(WavIO, BasicEven) { float audio[] = {0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f}; string result; TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 44100, 2, 3, &result)); EXPECT_EQ(56, result.size()); TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 22050, 1, 6, &result)); EXPECT_EQ(56, result.size()); TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 8000, 1, 6, &result)); EXPECT_EQ(56, result.size()); } TEST(WavIO, BasicOdd) { float audio[] = {0.0f, 0.1f, 0.2f, 0.3f, 0.4f}; string result; TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 22050, 1, 5, &result)); EXPECT_EQ(54, result.size()); } TEST(WavIO, EncodeThenDecode) { float audio[] = {0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f}; string wav_data; TF_ASSERT_OK(EncodeAudioAsS16LEWav(audio, 44100, 2, 3, &wav_data)); std::vector<float> decoded_audio; uint32 decoded_sample_count; uint16 decoded_channel_count; uint32 decoded_sample_rate; TF_ASSERT_OK(DecodeLin16WaveAsFloatVector( wav_data, &decoded_audio, &decoded_sample_count, &decoded_channel_count, &decoded_sample_rate)); EXPECT_EQ(2, decoded_channel_count); EXPECT_EQ(3, decoded_sample_count); EXPECT_EQ(44100, decoded_sample_rate); for (int i = 0; i < 6; ++i) { EXPECT_NEAR(audio[i], decoded_audio[i], 1e-4f) << "i=" << i; } } TEST(WavIO, BasicMono) { std::vector<uint8> wav_data = { 'R', 'I', 'F', 'F', 44, 0, 0, 0, 'W', 'A', 'V', 'E', 'f', 'm', 't', ' ', 16, 0, 0, 0, 1, 0, 1, 0, 0x44, 0xac, 0, 0, 0x88, 0x58, 0x1, 0, 2, 0, 16, 0, 'd', 'a', 't', 'a', 8, 0, 0, 0, 0, 0, 0xff, 0x7f, 0, 0, 0x00, 0x80, }; string expected(wav_data.begin(), wav_data.end()); float audio[] = {0.0f, 1.0f, 0.0f, -1.0f}; string result; TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 44100, 1, 4, &result)); EXPECT_EQ(expected, result); } TEST(WavIO, BasicStereo) { std::vector<uint8> wav_data = { 'R', 'I', 'F', 'F', 44, 0, 0, 0, 'W', 'A', 'V', 'E', 'f', 'm', 't', ' ', 16, 0, 0, 0, 1, 0, 2, 0, 0x44, 0xac, 0, 0, 0x10, 0xb1, 0x2, 0, 4, 0, 16, 0, 'd', 'a', 't', 'a', 8, 0, 0, 0, 0, 0, 0xff, 0x7f, 0, 0, 0x00, 0x80, }; string expected(wav_data.begin(), wav_data.end()); float audio[] = {0.0f, 1.0f, 0.0f, -1.0f}; string result; TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 44100, 2, 2, &result)); EXPECT_EQ(expected, result); } TEST(WavIO, ChunkSizeOverflow) { std::vector<uint8> wav_data = { 'R', 'I', 'F', 'F', 60, 0, 0, 0, 'W', 'A', 'V', 'E', 'f', 'm', 't', ' ', 16, 0, 0, 0, 1, 0, 1, 0, 0x44, 0xac, 0, 0, 0x88, 0x58, 0x1, 0, 2, 0, 16, 0, 'd', 'a', 't', 'a', 8, 0, 0, 0, 0, 0, 0xff, 0x7f, 0, 0, 0x00, 0x80, 'f', 'o', 'o', 'o', 0xff, 0xff, 0xff, 0xf8, 0, 0, 0xff, 0x7f, 0, 0, 0x00, 0x80, }; string wav_data_string(wav_data.begin(), wav_data.end()); std::vector<float> decoded_audio; uint32 decoded_sample_count; uint16 decoded_channel_count; uint32 decoded_sample_rate; Status decode_status = DecodeLin16WaveAsFloatVector( wav_data_string, &decoded_audio, &decoded_sample_count, &decoded_channel_count, &decoded_sample_rate); EXPECT_FALSE(decode_status.ok()); EXPECT_TRUE(absl::StrContains(decode_status.message(), "too large")) << decode_status.message(); } TEST(WavIO, IncrementOffset) { int new_offset = -1; TF_EXPECT_OK(IncrementOffset(0, 10, 20, &new_offset)); EXPECT_EQ(10, new_offset); new_offset = -1; TF_EXPECT_OK(IncrementOffset(10, 4, 20, &new_offset)); EXPECT_EQ(14, new_offset); new_offset = -1; TF_EXPECT_OK(IncrementOffset(99, 1, 100, &new_offset)); EXPECT_EQ(100, new_offset); new_offset = -1; EXPECT_FALSE(IncrementOffset(-1, 1, 100, &new_offset).ok()); new_offset = -1; EXPECT_FALSE(IncrementOffset(0, -1, 100, &new_offset).ok()); new_offset = -1; EXPECT_FALSE(IncrementOffset(std::numeric_limits<int>::max(), 1, std::numeric_limits<int>::max(), &new_offset) .ok()); new_offset = -1; EXPECT_FALSE(IncrementOffset(101, 1, 100, &new_offset).ok()); } TEST(WavIO, ExpectText) { std::vector<uint8> test_data = { 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', }; string test_string(test_data.begin(), test_data.end()); int offset = 0; TF_EXPECT_OK(ExpectText(test_string, "Expected", &offset)); EXPECT_EQ(8, offset); offset = 0; Status expect_status = ExpectText(test_string, "Unexpected", &offset); EXPECT_FALSE(expect_status.ok()); offset = 0; TF_EXPECT_OK(ExpectText(test_string, "Exp", &offset)); EXPECT_EQ(3, offset); TF_EXPECT_OK(ExpectText(test_string, "ected", &offset)); EXPECT_EQ(8, offset); expect_status = ExpectText(test_string, "foo", &offset); EXPECT_FALSE(expect_status.ok()); } TEST(WavIO, ReadString) { std::vector<uint8> test_data = { 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', }; string test_string(test_data.begin(), test_data.end()); int offset = 0; string read_value; TF_EXPECT_OK(ReadString(test_string, 2, &read_value, &offset)); EXPECT_EQ("Ex", read_value); EXPECT_EQ(2, offset); TF_EXPECT_OK(ReadString(test_string, 6, &read_value, &offset)); EXPECT_EQ("pected", read_value); EXPECT_EQ(8, offset); Status read_status = ReadString(test_string, 3, &read_value, &offset); EXPECT_FALSE(read_status.ok()); } TEST(WavIO, ReadValueInt8) { std::vector<uint8> test_data = {0x00, 0x05, 0xff, 0x80}; string test_string(test_data.begin(), test_data.end()); int offset = 0; int8_t read_value; TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(0, read_value); EXPECT_EQ(1, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(5, read_value); EXPECT_EQ(2, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(-1, read_value); EXPECT_EQ(3, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(-128, read_value); EXPECT_EQ(4, offset); Status read_status = ReadValue(test_string, &read_value, &offset); EXPECT_FALSE(read_status.ok()); } TEST(WavIO, ReadValueUInt8) { std::vector<uint8> test_data = {0x00, 0x05, 0xff, 0x80}; string test_string(test_data.begin(), test_data.end()); int offset = 0; uint8 read_value; TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(0, read_value); EXPECT_EQ(1, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(5, read_value); EXPECT_EQ(2, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(255, read_value); EXPECT_EQ(3, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(128, read_value); EXPECT_EQ(4, offset); Status read_status = ReadValue(test_string, &read_value, &offset); EXPECT_FALSE(read_status.ok()); } TEST(WavIO, ReadValueInt16) { std::vector<uint8> test_data = { 0x00, 0x00, 0xff, 0x00, 0x00, 0x01, 0xff, 0xff, 0x00, 0x80, }; string test_string(test_data.begin(), test_data.end()); int offset = 0; int16_t read_value; TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(0, read_value); EXPECT_EQ(2, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(255, read_value); EXPECT_EQ(4, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(256, read_value); EXPECT_EQ(6, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(-1, read_value); EXPECT_EQ(8, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(-32768, read_value); EXPECT_EQ(10, offset); Status read_status = ReadValue(test_string, &read_value, &offset); EXPECT_FALSE(read_status.ok()); } TEST(WavIO, ReadValueUInt16) { std::vector<uint8> test_data = { 0x00, 0x00, 0xff, 0x00, 0x00, 0x01, 0xff, 0xff, 0x00, 0x80, }; string test_string(test_data.begin(), test_data.end()); int offset = 0; uint16 read_value; TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(0, read_value); EXPECT_EQ(2, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(255, read_value); EXPECT_EQ(4, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(256, read_value); EXPECT_EQ(6, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(65535, read_value); EXPECT_EQ(8, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(32768, read_value); EXPECT_EQ(10, offset); Status read_status = ReadValue(test_string, &read_value, &offset); EXPECT_FALSE(read_status.ok()); } TEST(WavIO, ReadValueInt32) { std::vector<uint8> test_data = { 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0xff, 0xff, 0xff, }; string test_string(test_data.begin(), test_data.end()); int offset = 0; int32_t read_value; TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(0, read_value); EXPECT_EQ(4, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(255, read_value); EXPECT_EQ(8, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(65280, read_value); EXPECT_EQ(12, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(16711680, read_value); EXPECT_EQ(16, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(-1, read_value); EXPECT_EQ(20, offset); Status read_status = ReadValue(test_string, &read_value, &offset); EXPECT_FALSE(read_status.ok()); } TEST(WavIO, ReadValueUInt32) { std::vector<uint8> test_data = { 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0xff, 0xff, 0xff, }; string test_string(test_data.begin(), test_data.end()); int offset = 0; uint32 read_value; TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(0, read_value); EXPECT_EQ(4, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(255, read_value); EXPECT_EQ(8, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(65280, read_value); EXPECT_EQ(12, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(16711680, read_value); EXPECT_EQ(16, offset); TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset)); EXPECT_EQ(4294967295, read_value); EXPECT_EQ(20, offset); Status read_status = ReadValue(test_string, &read_value, &offset); EXPECT_FALSE(read_status.ok()); } } }
Status ReadString(const std::string& data, int expected_length, std::string* value, int* offset) { int new_offset; TF_RETURN_IF_ERROR( IncrementOffset(*offset, expected_length, data.size(), &new_offset)); *value = std::string(data.begin() + *offset, data.begin() + new_offset); *offset = new_offset; return absl::OkStatus(); }
TEST(WavIO, ReadString) { std::vector<uint8> test_data = { 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', }; string test_string(test_data.begin(), test_data.end()); int offset = 0; string read_value; TF_EXPECT_OK(ReadString(test_string, 2, &read_value, &offset)); EXPECT_EQ("Ex", read_value); EXPECT_EQ(2, offset); TF_EXPECT_OK(ReadString(test_string, 6, &read_value, &offset)); EXPECT_EQ("pected", read_value); EXPECT_EQ(8, offset); Status read_status = ReadString(test_string, 3, &read_value, &offset); EXPECT_FALSE(read_status.ok()); }
#include "tensorflow/core/data/service/dispatcher_state.h" #include <algorithm> #include <memory> #include <optional> #include <string> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "tensorflow/core/data/service/common.h" #include "tensorflow/core/data/service/journal.h" #include "tensorflow/core/data/service/journal.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/protobuf/data_service.pb.h" #include "tensorflow/core/protobuf/service_config.pb.h" namespace tensorflow { namespace data { DispatcherState::DispatcherState() : worker_index_resolver_(std::vector<std::string>{}) {} DispatcherState::DispatcherState( const experimental::DispatcherConfig& dispatcher_config) : worker_index_resolver_(dispatcher_config.worker_addresses()) {} Status DispatcherState::Apply(const Update& update) { switch (update.update_type_case()) { case Update::kRegisterDataset: RegisterDataset(update.register_dataset()); break; case Update::kRegisterWorker: RegisterWorker(update.register_worker()); break; case Update::kCreateJob: CreateJob(update.create_job()); break; case Update::kCreateIteration: CreateIteration(update.create_iteration()); break; case Update::kProduceSplit: ProduceSplit(update.produce_split()); break; case Update::kAcquireIterationClient: AcquireIterationClient(update.acquire_iteration_client()); break; case Update::kReleaseIterationClient: ReleaseIterationClient(update.release_iteration_client()); break; case Update::kGarbageCollectIteration: GarbageCollectIteration(update.garbage_collect_iteration()); break; case Update::kRemoveTask: RemoveTask(update.remove_task()); break; case Update::kCreatePendingTask: CreatePendingTask(update.create_pending_task()); break; case Update::kClientHeartbeat: ClientHeartbeat(update.client_heartbeat()); break; case Update::kCreateTask: CreateTask(update.create_task()); break; case Update::kFinishTask: FinishTask(update.finish_task()); break; case Update::kSnapshot: Snapshot(update.snapshot()); break; case Update::kCompressionDisabledAtRuntime: CompressionDisabledAtRuntime(update.compression_disabled_at_runtime()); break; case Update::UPDATE_TYPE_NOT_SET: return errors::Internal("Update type not set."); } return absl::OkStatus(); } void DispatcherState::RegisterDataset( const RegisterDatasetUpdate& register_dataset) { std::string dataset_id = register_dataset.dataset_id(); auto dataset = std::make_shared<Dataset>(dataset_id, register_dataset.metadata()); DCHECK(!datasets_by_id_.contains(dataset_id)); datasets_by_id_[dataset_id] = dataset; UpdateNextAvailableDatasetId(); } void DispatcherState::RegisterWorker( const RegisterWorkerUpdate& register_worker) { std::string address = register_worker.worker_address(); DCHECK(!workers_.contains(address)); workers_[address] = std::make_shared<Worker>(register_worker); tasks_by_worker_[address] = absl::flat_hash_map<int64_t, std::shared_ptr<Task>>(); worker_index_resolver_.AddWorker(address); } void DispatcherState::CreateJob(const CreateJobUpdate& create_job) { int64_t job_id = create_job.job_id(); std::string job_name = create_job.job_name(); std::optional<int64_t> num_consumers; if (create_job.optional_num_consumers_case() == CreateJobUpdate::kNumConsumers) { num_consumers = create_job.num_consumers(); } auto job = std::make_shared<Job>( job_id, create_job.dataset_id(), create_job.processing_mode_def(), job_name, num_consumers, create_job.use_cross_trainer_cache(), create_job.target_workers()); DCHECK(!jobs_by_id_.contains(job_id)); jobs_by_id_[job_id] = job; DCHECK(!jobs_by_name_.contains(job_name)); jobs_by_name_[job_name] = job; next_available_job_id_ = std::max(next_available_job_id_, job_id + 1); } Status DispatcherState::JobFromId(int64_t job_id, std::shared_ptr<const Job>& job) const { auto it = jobs_by_id_.find(job_id); if (it == jobs_by_id_.end()) { return errors::NotFound("Job with id ", job_id, " not found"); } job = it->second; return absl::OkStatus(); } Status DispatcherState::JobByName(const std::string& job_name, std::shared_ptr<const Job>& job) const { auto it = jobs_by_name_.find(job_name); if (it == jobs_by_name_.end()) { return errors::NotFound("Job with name ", job_name, " not found"); } job = it->second; return absl::OkStatus(); } void DispatcherState::CreateIteration( const CreateIterationUpdate& create_iteration) { int64_t iteration_id = create_iteration.iteration_id(); int64_t job_id = create_iteration.job_id(); DCHECK(jobs_by_id_.contains(job_id)); auto& job = jobs_by_id_[job_id]; DCHECK(job); IterationKey iteration_key(job->job_name, create_iteration.repetition()); auto iteration = std::make_shared<Iteration>( iteration_id, iteration_key, create_iteration.num_split_providers(), job); DCHECK(!iterations_.contains(iteration_id)); iterations_[iteration_id] = iteration; tasks_by_iteration_[iteration_id] = std::vector<std::shared_ptr<Task>>(); DCHECK(!iterations_by_key_.contains(iteration_key) || iterations_by_key_[iteration_key]->garbage_collected); iterations_by_key_[iteration_key] = iteration; next_available_iteration_id_ = std::max(next_available_iteration_id_, iteration_id + 1); } void DispatcherState::ProduceSplit(const ProduceSplitUpdate& produce_split) { std::shared_ptr<Iteration> iteration = iterations_[produce_split.iteration_id()]; DCHECK(iteration->distributed_epoch_state.has_value()); DistributedEpochState& state = iteration->distributed_epoch_state.value(); int64_t provider_index = produce_split.split_provider_index(); DCHECK_GE(produce_split.repetition(), state.repetitions[provider_index]); state.repetitions[provider_index] = produce_split.repetition(); if (produce_split.finished()) { state.repetitions[provider_index]++; state.indices[provider_index] = 0; return; } state.indices[provider_index]++; } void DispatcherState::AcquireIterationClient( const AcquireIterationClientUpdate& acquire_iteration_client) { int64_t iteration_client_id = acquire_iteration_client.iteration_client_id(); std::shared_ptr<Iteration>& iteration = iterations_for_client_ids_[iteration_client_id]; DCHECK(!iteration); iteration = iterations_[acquire_iteration_client.iteration_id()]; DCHECK(iteration); iteration->num_clients++; next_available_iteration_client_id_ = std::max(next_available_iteration_client_id_, iteration_client_id + 1); } void DispatcherState::ReleaseIterationClient( const ReleaseIterationClientUpdate& release_iteration_client) { int64_t iteration_client_id = release_iteration_client.iteration_client_id(); std::shared_ptr<Iteration>& iteration = iterations_for_client_ids_[iteration_client_id]; DCHECK(iteration); iteration->num_clients--; DCHECK_GE(iteration->num_clients, 0); iteration->last_client_released_micros = release_iteration_client.time_micros(); iterations_for_client_ids_.erase(iteration_client_id); } void DispatcherState::GarbageCollectIteration( const GarbageCollectIterationUpdate& garbage_collect_iteration) { int64_t iteration_id = garbage_collect_iteration.iteration_id(); for (auto& task : tasks_by_iteration_[iteration_id]) { task->finished = true; tasks_by_worker_[task->worker_address].erase(task->task_id); } iterations_[iteration_id]->finished = true; iterations_[iteration_id]->garbage_collected = true; } void DispatcherState::RemoveTask(const RemoveTaskUpdate& remove_task) { std::shared_ptr<Task>& task = tasks_[remove_task.task_id()]; DCHECK(task); task->removed = true; auto& tasks_for_iteration = tasks_by_iteration_[task->iteration->iteration_id]; for (auto it = tasks_for_iteration.begin(); it != tasks_for_iteration.end(); ++it) { if ((*it)->task_id == task->task_id) { tasks_for_iteration.erase(it); break; } } tasks_by_worker_[task->worker_address].erase(task->task_id); tasks_.erase(task->task_id); VLOG(1) << "Removed task " << remove_task.task_id() << " from worker " << task->worker_address; } void DispatcherState::CreatePendingTask( const CreatePendingTaskUpdate& create_pending_task) { int64_t task_id = create_pending_task.task_id(); auto& task = tasks_[task_id]; DCHECK_EQ(task, nullptr); auto& iteration = iterations_[create_pending_task.iteration_id()]; DCHECK_NE(iteration, nullptr); task = std::make_shared<Task>(create_pending_task, iteration); iteration->pending_tasks.emplace(task, create_pending_task.starting_round()); tasks_by_worker_[create_pending_task.worker_address()][task->task_id] = task; next_available_task_id_ = std::max(next_available_task_id_, task_id + 1); } void DispatcherState::ClientHeartbeat( const ClientHeartbeatUpdate& client_heartbeat) { int64_t iteration_client_id = client_heartbeat.iteration_client_id(); auto& iteration = iterations_for_client_ids_[iteration_client_id]; DCHECK(!iteration->pending_tasks.empty()); auto& task = iteration->pending_tasks.front(); if (client_heartbeat.has_task_rejected()) { task.failures++; task.ready_consumers.clear(); task.target_round = client_heartbeat.task_rejected().new_target_round(); } if (client_heartbeat.task_accepted()) { task.ready_consumers.insert(iteration_client_id); if (task.ready_consumers.size() == iteration->job->num_consumers.value()) { VLOG(1) << "Promoting task " << task.task->task_id << " from pending to active"; task.task->starting_round = task.target_round; tasks_by_iteration_[iteration->iteration_id].push_back(task.task); iteration->pending_tasks.pop(); } } } void DispatcherState::CreateTask(const CreateTaskUpdate& create_task) { int64_t task_id = create_task.task_id(); auto& task = tasks_[task_id]; DCHECK_EQ(task, nullptr); auto& iteration = iterations_[create_task.iteration_id()]; DCHECK_NE(iteration, nullptr); task = std::make_shared<Task>(create_task, iteration); tasks_by_iteration_[create_task.iteration_id()].push_back(task); tasks_by_worker_[create_task.worker_address()][task->task_id] = task; next_available_task_id_ = std::max(next_available_task_id_, task_id + 1); } void DispatcherState::FinishTask(const FinishTaskUpdate& finish_task) { VLOG(2) << "Marking task " << finish_task.task_id() << " as finished"; int64_t task_id = finish_task.task_id(); auto& task = tasks_[task_id]; DCHECK(task != nullptr); task->finished = true; tasks_by_worker_[task->worker_address].erase(task->task_id); bool all_finished = true; for (const auto& task_for_iteration : tasks_by_iteration_[task->iteration->iteration_id]) { if (!task_for_iteration->finished) { all_finished = false; } } VLOG(3) << "Iteration " << task->iteration->iteration_id << " finished: " << all_finished; iterations_[task->iteration->iteration_id]->finished = all_finished; } std::string DispatcherState::NextAvailableDatasetId() const { return absl::StrCat(next_available_dataset_id_); } void DispatcherState::UpdateNextAvailableDatasetId() { while (datasets_by_id_.contains(absl::StrCat(next_available_dataset_id_))) { ++next_available_dataset_id_; } } Status DispatcherState::DatasetFromId( const std::string& id, std::shared_ptr<const Dataset>& dataset) const { auto it = datasets_by_id_.find(id); if (it == datasets_by_id_.end()) { return errors::NotFound("Dataset id ", id, " not found"); } dataset = it->second; return absl::OkStatus(); } Status DispatcherState::WorkerFromAddress( const std::string& address, std::shared_ptr<const Worker>& worker) const { auto it = workers_.find(address); if (it == workers_.end()) { return errors::NotFound("Worker with address ", address, " not found."); } worker = it->second; return absl::OkStatus(); } std::vector<std::shared_ptr<const DispatcherState::Worker>> DispatcherState::ListWorkers() const { std::vector<std::shared_ptr<const Worker>> workers; workers.reserve(workers_.size()); for (const auto& it : workers_) { workers.push_back(it.second); } return workers; } std::vector<std::shared_ptr<const DispatcherState::Iteration>> DispatcherState::ListIterations() const { std::vector<std::shared_ptr<const DispatcherState::Iteration>> iterations; iterations.reserve(iterations_.size()); for (const auto& it : iterations_) { iterations.push_back(it.second); } return iterations; } Status DispatcherState::IterationFromId( int64_t id, std::shared_ptr<const Iteration>& iteration) const { auto it = iterations_.find(id); if (it == iterations_.end()) { return errors::NotFound("Iteration id ", id, " not found"); } iteration = it->second; return absl::OkStatus(); } Status DispatcherState::IterationByKey( IterationKey iteration_key, std::shared_ptr<const Iteration>& iteration) const { auto it = iterations_by_key_.find(iteration_key); if (it == iterations_by_key_.end()) { return errors::NotFound("Iteration key ", iteration_key.DebugString(), " not found"); } iteration = it->second; return absl::OkStatus(); } int64_t DispatcherState::NextAvailableJobId() const { return next_available_job_id_; } int64_t DispatcherState::NextAvailableIterationId() const { return next_available_iteration_id_; } Status DispatcherState::IterationForIterationClientId( int64_t iteration_client_id, std::shared_ptr<const Iteration>& iteration) { iteration = iterations_for_client_ids_[iteration_client_id]; if (!iteration) { return errors::NotFound("Iteration client id not found: ", iteration_client_id); } return absl::OkStatus(); } std::vector<int64_t> DispatcherState::ListActiveClientIds() { std::vector<int64_t> ids; for (const auto& it : iterations_for_client_ids_) { if (it.second && !it.second->finished) { ids.push_back(it.first); } } return ids; } int64_t DispatcherState::NextAvailableIterationClientId() const { return next_available_iteration_client_id_; } Status DispatcherState::TaskFromId(int64_t id, std::shared_ptr<const Task>& task) const { auto it = tasks_.find(id); if (it == tasks_.end()) { return errors::NotFound("Task ", id, " not found"); } task = it->second; return absl::OkStatus(); } Status DispatcherState::TasksForIteration( int64_t iteration_id, std::vector<std::shared_ptr<const Task>>& tasks) const { auto it = tasks_by_iteration_.find(iteration_id); if (it == tasks_by_iteration_.end()) { return errors::NotFound("Iteration ", iteration_id, " not found"); } tasks.clear(); tasks.reserve(it->second.size()); for (const auto& task : it->second) { tasks.push_back(task); } return absl::OkStatus(); } Status DispatcherState::TasksForWorker( absl::string_view worker_address, std::vector<std::shared_ptr<const Task>>& tasks) const { tasks.clear(); auto it = tasks_by_worker_.find(worker_address); if (it == tasks_by_worker_.end()) { return errors::NotFound("Worker ", worker_address, " not found"); } const absl::flat_hash_map<int64_t, std::shared_ptr<Task>>& worker_tasks = it->second; tasks.reserve(worker_tasks.size()); for (const auto& task : worker_tasks) { tasks.push_back(task.second); } return absl::OkStatus(); } int64_t DispatcherState::NextAvailableTaskId() const { return next_available_task_id_; } Status DispatcherState::ValidateWorker(absl::string_view worker_address) const { return worker_index_resolver_.ValidateWorker(worker_address); } absl::StatusOr<int64_t> DispatcherState::GetWorkerIndex( absl::string_view worker_address) const { return worker_index_resolver_.GetWorkerIndex(worker_address); } void DispatcherState::Snapshot(const SnapshotUpdate& snapshot) { snapshot_paths_.insert(snapshot.path()); } void DispatcherState::CompressionDisabledAtRuntime( const CompressionDisabledAtRuntimeUpdate& compression_disabled_at_runtime) { compression_disabled_at_runtime_.insert({ compression_disabled_at_runtime.dataset_id(), compression_disabled_at_runtime.compression_disabled(), }); } std::optional<bool> DispatcherState::CompressionDisabledAtRuntime( const std::string& dataset_id) const { if (auto it = compression_disabled_at_runtime_.find(dataset_id); it != compression_disabled_at_runtime_.end()) { return it->second; } return std::nullopt; } } }
#include "tensorflow/core/data/service/dispatcher_state.h" #include <cstdint> #include <memory> #include <string> #include <vector> #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/data/service/journal.pb.h" #include "tensorflow/core/platform/random.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/data_service.pb.h" #include "tensorflow/core/protobuf/service_config.pb.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/status_matchers.h" namespace tensorflow { namespace data { namespace { using Dataset = DispatcherState::Dataset; using Worker = DispatcherState::Worker; using IterationKey = DispatcherState::IterationKey; using Job = DispatcherState::Job; using Iteration = DispatcherState::Iteration; using Task = DispatcherState::Task; using ::testing::HasSubstr; using ::testing::IsEmpty; using ::testing::SizeIs; using ::testing::UnorderedElementsAre; using ::tsl::testing::StatusIs; Status RegisterDataset(const std::string& dataset_id, DispatcherState& state) { Update update; RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset(); register_dataset->set_dataset_id(dataset_id); return state.Apply(update); } Status RegisterWorker(std::string worker_address, DispatcherState& state) { Update update; update.mutable_register_worker()->set_worker_address(worker_address); return state.Apply(update); } Status CreateJob(int64_t job_id, const std::string& dataset_id, const std::string& job_name, DispatcherState& state) { Update update; CreateJobUpdate* create_job = update.mutable_create_job(); create_job->set_job_id(job_id); create_job->set_dataset_id(dataset_id); create_job->set_job_name(job_name); return state.Apply(update); } Status CreateIteration(int64_t iteration_id, const std::string& dataset_id, const IterationKey& named_iteration_key, DispatcherState& state) { int64_t job_id = state.NextAvailableJobId(); TF_RETURN_IF_ERROR( CreateJob(job_id, dataset_id, named_iteration_key.name, state)); Update update; CreateIterationUpdate* create_iteration = update.mutable_create_iteration(); create_iteration->set_job_id(job_id); create_iteration->set_iteration_id(iteration_id); create_iteration->set_repetition(named_iteration_key.repetition); return state.Apply(update); } Status CreateIteration(int64_t iteration_id, const std::string& dataset_id, DispatcherState& state) { IterationKey key(absl::StrCat(random::New64()), 0); return CreateIteration(iteration_id, dataset_id, key, state); } Status AcquireIterationClientId(int64_t iteration_id, int64_t iteration_client_id, DispatcherState& state) { Update update; AcquireIterationClientUpdate* acquire_iteration_client = update.mutable_acquire_iteration_client(); acquire_iteration_client->set_iteration_id(iteration_id); acquire_iteration_client->set_iteration_client_id(iteration_client_id); return state.Apply(update); } Status ReleaseIterationClientId(int64_t iteration_client_id, int64_t release_time, DispatcherState& state) { Update update; ReleaseIterationClientUpdate* release_iteration_client = update.mutable_release_iteration_client(); release_iteration_client->set_iteration_client_id(iteration_client_id); release_iteration_client->set_time_micros(release_time); return state.Apply(update); } Status CreateTask(int64_t task_id, int64_t iteration_id, const std::string& worker_address, DispatcherState& state) { Update update; CreateTaskUpdate* create_task = update.mutable_create_task(); create_task->set_task_id(task_id); create_task->set_iteration_id(iteration_id); create_task->set_worker_address(worker_address); return state.Apply(update); } Status FinishTask(int64_t task_id, DispatcherState& state) { Update update; FinishTaskUpdate* finish_task = update.mutable_finish_task(); finish_task->set_task_id(task_id); return state.Apply(update); } Status Snapshot(const std::string& path, DispatcherState& state) { Update update; SnapshotUpdate* snapshot = update.mutable_snapshot(); snapshot->set_path(path); return state.Apply(update); } } TEST(DispatcherState, RegisterDataset) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t dataset_id_int; ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int)); TF_EXPECT_OK(RegisterDataset(dataset_id, state)); EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1)); std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset)); EXPECT_TRUE(dataset->metadata.element_spec().empty()); EXPECT_EQ(dataset->metadata.compression(), DataServiceMetadata::COMPRESSION_UNSPECIFIED); } TEST(DispatcherState, RegisterDatasetWithExplicitID) { DispatcherState state; TF_EXPECT_OK(RegisterDataset("dataset_id", state)); std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId("dataset_id", dataset)); EXPECT_EQ(dataset->dataset_id, "dataset_id"); } TEST(DispatcherState, RegisterDatasetsWithDifferentIDs) { DispatcherState state; TF_EXPECT_OK(RegisterDataset("dataset_id1", state)); TF_EXPECT_OK(RegisterDataset("dataset_id2", state)); std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId("dataset_id1", dataset)); EXPECT_EQ(dataset->dataset_id, "dataset_id1"); TF_EXPECT_OK(state.DatasetFromId("dataset_id2", dataset)); EXPECT_EQ(dataset->dataset_id, "dataset_id2"); } TEST(DispatcherState, RegisterDatasetCompression) { DispatcherState state; const std::string dataset_id = state.NextAvailableDatasetId(); Update update; RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset(); register_dataset->set_dataset_id(dataset_id); register_dataset->mutable_metadata()->set_compression( DataServiceMetadata::COMPRESSION_SNAPPY); TF_ASSERT_OK(state.Apply(update)); { std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset)); EXPECT_EQ(dataset->metadata.compression(), DataServiceMetadata::COMPRESSION_SNAPPY); } } TEST(DispatcherState, RegisterDatasetElementSpec) { DispatcherState state; const std::string dataset_id = state.NextAvailableDatasetId(); Update update; RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset(); register_dataset->set_dataset_id(dataset_id); register_dataset->mutable_metadata()->set_element_spec( "encoded_element_spec"); TF_ASSERT_OK(state.Apply(update)); { std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset)); EXPECT_EQ(dataset->metadata.element_spec(), "encoded_element_spec"); } } TEST(DispatcherState, MissingDatasetId) { DispatcherState state; std::shared_ptr<const Dataset> dataset; Status s = state.DatasetFromId("missing_dataset_id", dataset); EXPECT_EQ(s.code(), error::NOT_FOUND); } TEST(DispatcherState, NextAvailableDatasetId) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t dataset_id_int; ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int)); TF_EXPECT_OK(RegisterDataset(dataset_id, state)); EXPECT_NE(state.NextAvailableDatasetId(), dataset_id); EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1)); EXPECT_EQ(state.NextAvailableDatasetId(), state.NextAvailableDatasetId()); } TEST(DispatcherState, RegisterWorker) { DispatcherState state; std::string address = "test_worker_address"; TF_EXPECT_OK(RegisterWorker(address, state)); std::shared_ptr<const Worker> worker; TF_EXPECT_OK(state.WorkerFromAddress(address, worker)); EXPECT_EQ(worker->address, address); } TEST(DispatcherState, RegisterWorkerInFixedWorkerSet) { experimental::DispatcherConfig config; config.add_worker_addresses("/worker/task/0"); config.add_worker_addresses("/worker/task/1"); config.add_worker_addresses("/worker/task/2"); DispatcherState state(config); TF_EXPECT_OK(state.ValidateWorker("/worker/task/0:20000")); TF_EXPECT_OK(state.ValidateWorker("/worker/task/1:20000")); TF_EXPECT_OK(state.ValidateWorker("/worker/task/2:20000")); TF_EXPECT_OK(RegisterWorker("/worker/task/0:20000", state)); TF_EXPECT_OK(RegisterWorker("/worker/task/1:20000", state)); TF_EXPECT_OK(RegisterWorker("/worker/task/2:20000", state)); std::shared_ptr<const Worker> worker; TF_EXPECT_OK(state.WorkerFromAddress("/worker/task/0:20000", worker)); EXPECT_EQ(worker->address, "/worker/task/0:20000"); } TEST(DispatcherState, RegisterInvalidWorkerInFixedWorkerSet) { experimental::DispatcherConfig config; config.add_worker_addresses("/worker/task/0"); config.add_worker_addresses("/worker/task/1"); config.add_worker_addresses("/worker/task/2"); DispatcherState state(config); EXPECT_THAT(state.ValidateWorker("localhost:20000"), StatusIs(error::FAILED_PRECONDITION, HasSubstr("The worker's address is not configured"))); TF_EXPECT_OK(RegisterWorker("localhost:20000", state)); std::shared_ptr<const Worker> worker; EXPECT_THAT(state.WorkerFromAddress("/worker/task/0:20000", worker), StatusIs(error::NOT_FOUND, "Worker with address /worker/task/0:20000 not found.")); } TEST(DispatcherState, ListWorkers) { DispatcherState state; std::string address_1 = "address_1"; std::string address_2 = "address_2"; { std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers(); EXPECT_THAT(workers, IsEmpty()); } TF_EXPECT_OK(RegisterWorker(address_1, state)); { std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers(); EXPECT_THAT(workers, SizeIs(1)); } TF_EXPECT_OK(RegisterWorker(address_2, state)); { std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers(); EXPECT_THAT(workers, SizeIs(2)); } } TEST(DispatcherState, MissingWorker) { DispatcherState state; std::shared_ptr<const Worker> worker; Status s = state.WorkerFromAddress("test_worker_address", worker); EXPECT_EQ(s.code(), error::NOT_FOUND); } TEST(DispatcherState, UnknownUpdate) { DispatcherState state; Update update; Status s = state.Apply(update); EXPECT_EQ(s.code(), error::INTERNAL); } TEST(DispatcherState, JobName) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t job_id = state.NextAvailableJobId(); std::string job_name = "test_name"; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateJob(job_id, dataset_id, job_name, state)); std::shared_ptr<const Job> job; TF_EXPECT_OK(state.JobByName(job_name, job)); EXPECT_EQ(state.NextAvailableJobId(), job_id + 1); EXPECT_EQ(job->dataset_id, dataset_id); EXPECT_FALSE(job->use_cross_trainer_cache); } TEST(DispatcherState, JobData) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t job_id = state.NextAvailableJobId(); int64_t num_consumers = 8; bool use_cross_trainer_cache = true; TF_ASSERT_OK(RegisterDataset(dataset_id, state)); Update update; CreateJobUpdate* create_job = update.mutable_create_job(); create_job->set_job_id(job_id); create_job->set_dataset_id(dataset_id); create_job->set_num_consumers(num_consumers); create_job->set_use_cross_trainer_cache(use_cross_trainer_cache); TF_ASSERT_OK(state.Apply(update)); std::shared_ptr<const Job> job; TF_ASSERT_OK(state.JobFromId(job_id, job)); EXPECT_EQ(job->num_consumers, num_consumers); EXPECT_EQ(job->use_cross_trainer_cache, use_cross_trainer_cache); } TEST(DispatcherState, CrossTrainerCacheTask) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); std::string worker_address = "test_worker_address"; TF_ASSERT_OK(RegisterDataset(dataset_id, state)); int64_t job_id = state.NextAvailableJobId(); Update job_update; CreateJobUpdate* create_job = job_update.mutable_create_job(); create_job->set_job_id(job_id); create_job->set_dataset_id(dataset_id); create_job->set_use_cross_trainer_cache(true); TF_ASSERT_OK(state.Apply(job_update)); int64_t iteration_id = state.NextAvailableIterationId(); Update iteration_update; CreateIterationUpdate* create_iteration = iteration_update.mutable_create_iteration(); create_iteration->set_job_id(job_id); create_iteration->set_iteration_id(iteration_id); TF_ASSERT_OK(state.Apply(iteration_update)); int64_t task_id = state.NextAvailableTaskId(); TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state)); std::shared_ptr<const Task> task; TF_EXPECT_OK(state.TaskFromId(task_id, task)); EXPECT_EQ(task->iteration->iteration_id, iteration_id); EXPECT_EQ(task->task_id, task_id); EXPECT_EQ(task->worker_address, worker_address); EXPECT_TRUE(task->iteration->job->use_cross_trainer_cache); } TEST(DispatcherState, CreateTask) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; std::string worker_address = "test_worker_address"; DispatcherState state; int64_t task_id = state.NextAvailableTaskId(); TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state)); EXPECT_EQ(state.NextAvailableTaskId(), task_id + 1); { std::shared_ptr<const Task> task; TF_EXPECT_OK(state.TaskFromId(task_id, task)); EXPECT_EQ(task->iteration->iteration_id, iteration_id); EXPECT_EQ(task->task_id, task_id); EXPECT_EQ(task->worker_address, worker_address); EXPECT_FALSE(task->iteration->job->use_cross_trainer_cache); } { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks)); EXPECT_THAT(tasks, SizeIs(1)); } { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks)); EXPECT_EQ(1, tasks.size()); } } TEST(DispatcherState, CreateTasksForSameIteration) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id_1 = 8; int64_t task_id_2 = 9; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks)); EXPECT_THAT(tasks, SizeIs(2)); } } TEST(DispatcherState, CreateTasksForDifferentIterations) { std::string dataset_id = "dataset_id"; int64_t iteration_id_1 = 3; int64_t iteration_id_2 = 4; int64_t task_id_1 = 8; int64_t task_id_2 = 9; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id_1, dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id_2, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id_1, worker_address, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id_2, worker_address, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForIteration(iteration_id_1, tasks)); EXPECT_THAT(tasks, SizeIs(1)); } { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForIteration(iteration_id_2, tasks)); EXPECT_THAT(tasks, SizeIs(1)); } } TEST(DispatcherState, CreateTasksForSameWorker) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id_1 = 8; int64_t task_id_2 = 9; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks)); EXPECT_EQ(2, tasks.size()); } } TEST(DispatcherState, CreateTasksForDifferentWorkers) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id_1 = 8; int64_t task_id_2 = 9; std::string worker_address_1 = "test_worker_address_1"; std::string worker_address_2 = "test_worker_address_2"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address_1, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address_2, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address_1, tasks)); EXPECT_EQ(1, tasks.size()); } { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address_2, tasks)); EXPECT_EQ(1, tasks.size()); } } TEST(DispatcherState, GetTasksForWorkerEmpty) { std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterWorker(worker_address, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks)); EXPECT_EQ(0, tasks.size()); } } TEST(DispatcherState, FinishTask) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id = 4; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state)); TF_EXPECT_OK(FinishTask(task_id, state)); std::shared_ptr<const Task> task; TF_EXPECT_OK(state.TaskFromId(task_id, task)); EXPECT_TRUE(task->finished); std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_TRUE(iteration->finished); } TEST(DispatcherState, FinishMultiTaskIteration) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id_1 = 4; int64_t task_id_2 = 5; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state)); TF_EXPECT_OK(FinishTask(task_id_1, state)); { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_FALSE(iteration->finished); } TF_EXPECT_OK(FinishTask(task_id_2, state)); { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_TRUE(iteration->finished); } } TEST(DispatcherState, AcquireIterationClientId) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t iteration_client_id_1 = 1; int64_t iteration_client_id_2 = 2; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_1, state)); { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_EQ(iteration->num_clients, 1); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_2, state)); EXPECT_EQ(iteration->num_clients, 2); } { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK( state.IterationForIterationClientId(iteration_client_id_1, iteration)); EXPECT_EQ(iteration->iteration_id, iteration_id); } { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK( state.IterationForIterationClientId(iteration_client_id_2, iteration)); EXPECT_EQ(iteration->iteration_id, iteration_id); } } TEST(DispatcherState, ReleaseIterationClientId) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t iteration_client_id = 6; int64_t release_time = 100; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id, state)); TF_EXPECT_OK( ReleaseIterationClientId(iteration_client_id, release_time, state)); std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_EQ(iteration->num_clients, 0); Status s = state.IterationForIterationClientId(iteration_client_id, iteration); EXPECT_EQ(s.code(), error::NOT_FOUND); } TEST(DispatcherState, ListActiveClientsEmpty) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t iteration_client_id = 6; int64_t release_time = 100; DispatcherState state; EXPECT_THAT(state.ListActiveClientIds(), IsEmpty()); TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id, state)); TF_EXPECT_OK( ReleaseIterationClientId(iteration_client_id, release_time, state)); EXPECT_THAT(state.ListActiveClientIds(), IsEmpty()); } TEST(DispatcherState, ListActiveClients) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t iteration_client_id_1 = 6; int64_t iteration_client_id_2 = 7; int64_t iteration_client_id_3 = 8; int64_t release_time = 100; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_1, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_2, state)); TF_EXPECT_OK( ReleaseIterationClientId(iteration_client_id_2, release_time, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_3, state)); EXPECT_THAT(state.ListActiveClientIds(), UnorderedElementsAre(6, 8)); } TEST(DispatcherState, ListSnapshotPaths) { DispatcherState state; absl::flat_hash_set<std::string> snapshot_paths = {"p1", "p2"}; for (const auto& snapshot_path : snapshot_paths) { TF_EXPECT_OK(Snapshot(snapshot_path, state)); } EXPECT_EQ(state.ListSnapshotPaths(), snapshot_paths); } TEST(DispatcherState, GetNumberOfRegisteredWorkers) { DispatcherState state; std::string address_1 = "address_1"; std::string address_2 = "address_2"; EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 0); TF_EXPECT_OK(RegisterWorker(address_1, state)); EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 1); TF_EXPECT_OK(RegisterWorker(address_2, state)); EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 2); } } }
void DispatcherState::CreateJob(const CreateJobUpdate& create_job) { int64_t job_id = create_job.job_id(); std::string job_name = create_job.job_name(); std::optional<int64_t> num_consumers; if (create_job.optional_num_consumers_case() == CreateJobUpdate::kNumConsumers) { num_consumers = create_job.num_consumers(); } auto job = std::make_shared<Job>( job_id, create_job.dataset_id(), create_job.processing_mode_def(), job_name, num_consumers, create_job.use_cross_trainer_cache(), create_job.target_workers()); DCHECK(!jobs_by_id_.contains(job_id)); jobs_by_id_[job_id] = job; DCHECK(!jobs_by_name_.contains(job_name)); jobs_by_name_[job_name] = job; next_available_job_id_ = std::max(next_available_job_id_, job_id + 1); }
TEST(DispatcherState, JobName) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t job_id = state.NextAvailableJobId(); std::string job_name = "test_name"; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateJob(job_id, dataset_id, job_name, state)); std::shared_ptr<const Job> job; TF_EXPECT_OK(state.JobByName(job_name, job)); EXPECT_EQ(state.NextAvailableJobId(), job_id + 1); EXPECT_EQ(job->dataset_id, dataset_id); EXPECT_FALSE(job->use_cross_trainer_cache); } TEST(DispatcherState, JobData) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t job_id = state.NextAvailableJobId(); int64_t num_consumers = 8; bool use_cross_trainer_cache = true; TF_ASSERT_OK(RegisterDataset(dataset_id, state)); Update update; CreateJobUpdate* create_job = update.mutable_create_job(); create_job->set_job_id(job_id); create_job->set_dataset_id(dataset_id); create_job->set_num_consumers(num_consumers); create_job->set_use_cross_trainer_cache(use_cross_trainer_cache); TF_ASSERT_OK(state.Apply(update)); std::shared_ptr<const Job> job; TF_ASSERT_OK(state.JobFromId(job_id, job)); EXPECT_EQ(job->num_consumers, num_consumers); EXPECT_EQ(job->use_cross_trainer_cache, use_cross_trainer_cache); }
#include "absl/flags/internal/program_name.h" #include <string> #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/const_init.h" #include "absl/base/thread_annotations.h" #include "absl/flags/internal/path_util.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace flags_internal { ABSL_CONST_INIT static absl::Mutex program_name_guard(absl::kConstInit); ABSL_CONST_INIT static std::string* program_name ABSL_GUARDED_BY(program_name_guard) = nullptr; std::string ProgramInvocationName() { absl::MutexLock l(&program_name_guard); return program_name ? *program_name : "UNKNOWN"; } std::string ShortProgramInvocationName() { absl::MutexLock l(&program_name_guard); return program_name ? std::string(flags_internal::Basename(*program_name)) : "UNKNOWN"; } void SetProgramInvocationName(absl::string_view prog_name_str) { absl::MutexLock l(&program_name_guard); if (!program_name) program_name = new std::string(prog_name_str); else program_name->assign(prog_name_str.data(), prog_name_str.size()); } } ABSL_NAMESPACE_END }
#include "absl/flags/internal/program_name.h" #include <string> #include "gtest/gtest.h" #include "absl/strings/match.h" #include "absl/strings/string_view.h" namespace { namespace flags = absl::flags_internal; TEST(FlagsPathUtilTest, TestProgamNameInterfaces) { flags::SetProgramInvocationName("absl/flags/program_name_test"); std::string program_name = flags::ProgramInvocationName(); for (char& c : program_name) if (c == '\\') c = '/'; #if !defined(__wasm__) && !defined(__asmjs__) const std::string expect_name = "absl/flags/program_name_test"; const std::string expect_basename = "program_name_test"; #else const std::string expect_name = "this.program"; const std::string expect_basename = "this.program"; #endif EXPECT_TRUE(absl::EndsWith(program_name, expect_name)) << program_name; EXPECT_EQ(flags::ShortProgramInvocationName(), expect_basename); flags::SetProgramInvocationName("a/my_test"); EXPECT_EQ(flags::ProgramInvocationName(), "a/my_test"); EXPECT_EQ(flags::ShortProgramInvocationName(), "my_test"); absl::string_view not_null_terminated("absl/aaa/bbb"); not_null_terminated = not_null_terminated.substr(1, 10); flags::SetProgramInvocationName(not_null_terminated); EXPECT_EQ(flags::ProgramInvocationName(), "bsl/aaa/bb"); EXPECT_EQ(flags::ShortProgramInvocationName(), "bb"); } }
void SetProgramInvocationName(absl::string_view prog_name_str) { absl::MutexLock l(&program_name_guard); if (!program_name) program_name = new std::string(prog_name_str); else program_name->assign(prog_name_str.data(), prog_name_str.size()); }
TEST(FlagsPathUtilTest, TestProgamNameInterfaces) { flags::SetProgramInvocationName("absl/flags/program_name_test"); std::string program_name = flags::ProgramInvocationName(); for (char& c : program_name) if (c == '\\') c = '/'; #if !defined(__wasm__) && !defined(__asmjs__) const std::string expect_name = "absl/flags/program_name_test"; const std::string expect_basename = "program_name_test"; #else const std::string expect_name = "this.program"; const std::string expect_basename = "this.program"; #endif EXPECT_TRUE(absl::EndsWith(program_name, expect_name)) << program_name; EXPECT_EQ(flags::ShortProgramInvocationName(), expect_basename); flags::SetProgramInvocationName("a/my_test"); EXPECT_EQ(flags::ProgramInvocationName(), "a/my_test"); EXPECT_EQ(flags::ShortProgramInvocationName(), "my_test"); absl::string_view not_null_terminated("absl/aaa/bbb"); not_null_terminated = not_null_terminated.substr(1, 10); flags::SetProgramInvocationName(not_null_terminated); EXPECT_EQ(flags::ProgramInvocationName(), "bsl/aaa/bb"); EXPECT_EQ(flags::ShortProgramInvocationName(), "bb"); }
#ifndef TENSORSTORE_UTIL_RATIONAL_H_ #define TENSORSTORE_UTIL_RATIONAL_H_ #include <climits> #include <cmath> #include <limits> #include <ostream> #include <type_traits> #include "tensorstore/internal/integer_overflow.h" #include "tensorstore/serialization/fwd.h" #include "tensorstore/util/division.h" namespace tensorstore { template <typename I> class Rational { static_assert(std::numeric_limits<I>::is_specialized && std::numeric_limits<I>::is_integer && std::numeric_limits<I>::is_signed); public: using int_type = I; constexpr Rational(I value = 0) : n_(value), d_(1) {} constexpr Rational(I n, I d) { if (d != 0) { I gcd = tensorstore::GreatestCommonDivisor(n, d); n /= gcd; d /= gcd; if (n == 0) { d = 1; } else if (d < 0) { if (n == std::numeric_limits<I>::min() || d == std::numeric_limits<I>::min()) { d = 0; } else { d = -d; n = -n; } } } n_ = n; d_ = d; } constexpr Rational& operator=(I value) { *this = Rational<I>(value); return *this; } constexpr I numerator() const { return n_; } constexpr I denominator() const { return d_; } constexpr bool is_nan() const { return d_ == 0; } static constexpr Rational nan() { Rational r; r.d_ = 0; return r; } constexpr explicit operator bool() const { return d_ == 0 || n_ != 0; } friend constexpr bool operator==(Rational t, Rational r) { return t.d_ != 0 && t.n_ == r.n_ && t.d_ == r.d_; } friend constexpr bool operator==(Rational t, I i) { return t.d_ == 1 && t.n_ == i; } friend constexpr bool operator==(I i, Rational t) { return (t == i); } friend constexpr bool operator!=(Rational t, Rational r) { return !(t == r); } friend constexpr bool operator!=(Rational t, I i) { return !(t == i); } friend constexpr bool operator!=(I i, Rational t) { return !(t == i); } friend std::ostream& operator<<(std::ostream& os, Rational x) { if (x.is_nan()) return os << "nan"; if (x.d_ == 1) return os << x.n_; return os << x.n_ << '/' << x.d_; } constexpr Rational operator+() const { return *this; } constexpr Rational operator-() const { Rational r; if (n_ == std::numeric_limits<I>::min()) return nan(); r.n_ = -n_; r.d_ = d_; return r; } friend constexpr Rational operator+(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return nan(); I g = GreatestCommonDivisor(t.d_, r.d_); t.d_ /= g; if (I temp; internal::MulOverflow(t.n_, r.d_ / g, &t.n_) || internal::MulOverflow(r.n_, t.d_, &temp) || internal::AddOverflow(t.n_, temp, &t.n_)) { return nan(); } g = GreatestCommonDivisor(t.n_, g); t.n_ /= g; if (internal::MulOverflow(t.d_, r.d_ / g, &t.d_)) return nan(); return t; } friend constexpr Rational operator+(Rational t, I i) { if (internal::MulOverflow(i, t.d_, &i) || internal::AddOverflow(t.n_, i, &t.n_)) { return nan(); } return t; } friend constexpr Rational operator+(I i, Rational t) { return t + i; } constexpr Rational& operator+=(Rational r) { return *this = *this + r; } constexpr Rational& operator+=(I i) { return *this = *this + i; } constexpr Rational& operator++() { return *this += 1; } constexpr Rational operator++(int) { Rational r = *this; *this += 1; return r; } friend constexpr Rational operator-(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return nan(); I g = GreatestCommonDivisor(t.d_, r.d_); t.d_ /= g; if (I temp; internal::MulOverflow(t.n_, r.d_ / g, &t.n_) || internal::MulOverflow(r.n_, t.d_, &temp) || internal::SubOverflow(t.n_, temp, &t.n_)) { return nan(); } g = GreatestCommonDivisor(t.n_, g); t.n_ /= g; if (internal::MulOverflow(t.d_, r.d_ / g, &t.d_)) return nan(); return t; } friend constexpr Rational operator-(Rational t, I i) { if (internal::MulOverflow(i, t.d_, &i) || internal::SubOverflow(t.n_, i, &t.n_)) { return nan(); } return t; } friend constexpr Rational operator-(I i, Rational r) { if (internal::MulOverflow(i, r.d_, &i) || internal::SubOverflow(i, r.n_, &r.n_)) { return nan(); } return r; } constexpr Rational& operator-=(Rational r) { return *this = *this - r; } constexpr Rational& operator-=(I i) { return *this = *this - i; } constexpr Rational& operator--() { return *this -= 1; } constexpr Rational operator--(int) { Rational r = *this; *this -= 1; return r; } friend constexpr Rational operator*(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return nan(); I gcd1 = GreatestCommonDivisor(t.n_, r.d_); I gcd2 = GreatestCommonDivisor(r.n_, t.d_); if (internal::MulOverflow(t.n_ / gcd1, r.n_ / gcd2, &t.n_) || internal::MulOverflow(t.d_ / gcd2, r.d_ / gcd1, &t.d_)) { return nan(); } return t; } friend constexpr Rational operator*(Rational t, I i) { if (t.is_nan()) return nan(); I gcd = GreatestCommonDivisor(i, t.d_); if (internal::MulOverflow(t.n_, i / gcd, &t.n_)) return nan(); t.d_ /= gcd; return t; } friend constexpr Rational operator*(I i, Rational t) { return t * i; } constexpr Rational& operator*=(Rational r) { return *this = *this * r; } constexpr Rational& operator*=(I i) { return *this = *this * i; } friend constexpr Rational operator/(Rational t, Rational r) { if (t.is_nan() || r.is_nan() || r.n_ == 0) return nan(); I gcd1 = GreatestCommonDivisor(t.n_, r.n_); I gcd2 = GreatestCommonDivisor(r.d_, t.d_); if (internal::MulOverflow(t.n_ / gcd1, r.d_ / gcd2, &t.n_) || internal::MulOverflow(t.d_ / gcd2, r.n_ / gcd1, &t.d_)) { return nan(); } if (t.d_ < 0) { if (t.d_ == std::numeric_limits<I>::min() || t.n_ == std::numeric_limits<I>::min()) { return nan(); } t.n_ = -t.n_; t.d_ = -t.d_; } return t; } friend constexpr Rational operator/(Rational t, I i) { if (t.is_nan() || i == 0) return nan(); I gcd = GreatestCommonDivisor(i, t.n_); t.n_ /= gcd; if (internal::MulOverflow(t.d_, i / gcd, &t.d_)) return nan(); return t; } friend constexpr Rational operator/(I i, Rational r) { if (r.is_nan() || r.n_ == 0) return nan(); I gcd1 = GreatestCommonDivisor(i, r.n_); Rational t; if (internal::MulOverflow(i / gcd1, r.d_, &t.n_)) return nan(); t.d_ = r.n_ / gcd1; if (t.d_ < 0) { if (t.d_ == std::numeric_limits<I>::min() || t.n_ == std::numeric_limits<I>::min()) { return nan(); } t.n_ = -t.n_; t.d_ = -t.d_; } return t; } constexpr Rational& operator/=(Rational r) { return *this = *this / r; } constexpr Rational& operator/=(I i) { return *this = *this / i; } friend constexpr bool operator<(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return false; if (t.d_ == r.d_) return t.n_ < r.n_; if (t.d_ == 1) return t.n_ < r; if (r.d_ == 1) return t < r.n_; ContinuedFraction ts{t}, rs{r}; bool reverse = false; while (true) { if (ts.q != rs.q) { return reverse ? ts.q > rs.q : ts.q < rs.q; } reverse = !reverse; if (ts.r == 0 || rs.r == 0) { break; } ts.next(); rs.next(); } if (ts.r == rs.r) { return false; } else { return (ts.r != 0) != reverse; } } friend constexpr bool operator<(Rational t, I i) { if (t.is_nan()) return false; I q = t.n_ / t.d_, r = t.n_ % t.d_; if (r < 0) { r += t.d_; --q; } return q < i; } friend constexpr bool operator<(I i, Rational t) { if (t.is_nan()) return false; I q = t.n_ / t.d_, r = t.n_ % t.d_; if (r > 0) { r -= t.d_; ++q; } return q > i; } friend constexpr bool operator>(Rational t, Rational r) { return r < t; } friend constexpr bool operator>(I i, Rational t) { return t < i; } friend constexpr bool operator>(Rational t, I i) { return i < t; } friend constexpr bool operator<=(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return false; return !(r < t); } friend constexpr bool operator<=(Rational t, I r) { if (t.is_nan()) return false; return !(r < t); } friend constexpr bool operator<=(I r, Rational t) { if (t.is_nan()) return false; return !(t < r); } friend constexpr bool operator>=(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return false; return !(t < r); } friend constexpr bool operator>=(I t, Rational r) { if (r.is_nan()) return false; return !(t < r); } friend constexpr bool operator>=(Rational t, I r) { if (t.is_nan()) return false; return !(t < r); } [[nodiscard]] static constexpr bool UnifyDenominators(Rational a, Rational b, I& numerator_a, I& numerator_b, I& denominator) { if (a.is_nan() || b.is_nan()) return false; I g = GreatestCommonDivisor(a.denominator(), b.denominator()); I a_factor = b.denominator() / g; I b_factor = a.denominator() / g; return !(internal::MulOverflow(b_factor, b.denominator(), &denominator) || internal::MulOverflow(a.numerator(), a_factor, &numerator_a) || internal::MulOverflow(b.numerator(), b_factor, &numerator_b)); } static Rational FromDouble(double value) { if (!std::isfinite(value)) return nan(); constexpr int max_exponent = sizeof(I) * CHAR_BIT - 2; int exponent; double mantissa = std::frexp(value, &exponent); if (exponent > max_exponent + 1) return nan(); if (exponent <= -max_exponent) return I(0); int n = std::min(max_exponent + 1, max_exponent + exponent); I numerator = static_cast<I>(std::round(std::ldexp(mantissa, n))); I denominator = I(1) << (n - exponent); return {numerator, denominator}; } constexpr Rational Approximate(I max_denominator) const { assert(max_denominator >= I(1)); if (d_ <= max_denominator) return *this; using U = std::make_unsigned_t<I>; U p0 = 0, q0 = 1, p1 = 1, q1 = 0; bool negative = false; U n = 0, d = d_; if (n_ < I(0)) { negative = true; n = ~static_cast<U>(n_) + U(1); } else { n = static_cast<U>(n_); } while (true) { U a = n / d; U r = n % d; U q2 = q0 + a * q1; if (q2 >= max_denominator) { U x = (max_denominator - q0) / q1; auto result = (x * 2 >= a) ? FromReduced(static_cast<I>(p0 + x * p1), static_cast<I>(q0 + x * q1)) : FromReduced(static_cast<I>(p1), static_cast<I>(q1)); if (negative) { result.n_ *= -1; } return result; } U p2 = p0 + a * p1; p0 = p1; q0 = q1; p1 = p2; q1 = q2; n = d; d = r; } } constexpr static auto ApplyMembers = [](auto&& x, auto f) { return f(x.n_, x.d_); }; private: static constexpr Rational FromReduced(I n, I d) { Rational r; r.n_ = n; r.d_ = d; return r; } struct ContinuedFraction { constexpr explicit ContinuedFraction(Rational x) : n(x.n_), d(x.d_), q(x.n_ / x.d_), r(x.n_ % x.d_) { if (r < 0) { r += d; --q; } } constexpr void next() { n = d; d = r; q = n / d; r = n % d; } I n, d, q, r; }; I n_ = 0; I d_ = 0; }; } #endif
#include "tensorstore/util/rational.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/util/str_cat.h" namespace { constexpr double pi = 3.14159265358979323846264338327950288; using ::tensorstore::Rational; TEST(RationalTest, Initialization) { static constexpr Rational<int> r1, r2(0), r3(1), r4(-3), r5(7, 2), r6(5, 15), r7(14, -21), r8(-4, 6), r9(-14, -70); static_assert(r1.numerator() == 0); static_assert(r2.numerator() == 0); static_assert(r3.numerator() == 1); static_assert(r4.numerator() == -3); static_assert(r5.numerator() == 7); static_assert(r6.numerator() == 1); static_assert(r7.numerator() == -2); static_assert(r8.numerator() == -2); static_assert(r9.numerator() == 1); static_assert(r1.denominator() == 1); static_assert(r2.denominator() == 1); static_assert(r3.denominator() == 1); static_assert(r4.denominator() == 1); static_assert(r5.denominator() == 2); static_assert(r6.denominator() == 3); static_assert(r7.denominator() == 3); static_assert(r8.denominator() == 3); static_assert(r9.denominator() == 5); static_assert(Rational<int>(0, 0).is_nan()); static_assert(Rational<int>(1, std::numeric_limits<int>::min()).is_nan()); static_assert(!Rational<int>(1, -std::numeric_limits<int>::max()).is_nan()); } TEST(RationalTest, Compare) { static constexpr Rational<int> r1, r2(0), r3(1), r4(-3), r5(7, 2), r6(5, 15), r7(14, -21), r8(-4, 6), r9(-14, -70), nan = Rational<int>::nan(); static_assert(r1 == r2); static_assert(r2 != r3); static_assert(r4 < r3); static_assert(r4 <= r5); static_assert(r1 <= r2); static_assert(r5 > r6); static_assert(r5 >= r6); static_assert(r7 >= r8); static_assert(!(r3 == r2)); static_assert(!(r1 != r2)); static_assert(!(r1 < r2)); static_assert(!(r5 < r6)); static_assert(!(r9 <= r2)); static_assert(!(r8 > r7)); static_assert(!(r8 > r2)); static_assert(!(r4 >= r6)); static_assert(r1 == 0); static_assert(r2 != -1); static_assert(r3 < 2); static_assert(r4 <= -3); static_assert(r5 > 3); static_assert(r6 >= 0); static_assert(0 == r2); static_assert(0 != r7); static_assert(-1 < r8); static_assert(-2 <= r9); static_assert(1 > r1); static_assert(1 >= r3); static constexpr Rational<int> x1(9, 4); static constexpr Rational<int> x2(61, 27); static constexpr Rational<int> x3(52, 23); static constexpr Rational<int> x4(70, 31); static_assert(x1 < x2); static_assert(!(x1 < x1)); static_assert(!(x2 < x2)); static_assert(!(x2 < x1)); static_assert(x2 < x3); static_assert(x4 < x2); static_assert(!(x3 < x4)); static_assert(r7 < x1); static_assert(!(x2 < r7)); static_assert(!(nan < nan)); static_assert(!(nan <= nan)); static_assert(!(nan == nan)); static_assert(nan != nan); static_assert(!(nan > nan)); static_assert(!(nan >= nan)); static_assert(!(nan < r1)); static_assert(!(nan == r1)); static_assert(nan != r1); static_assert(!(nan <= r1)); static_assert(!(nan > r1)); static_assert(!(nan >= r1)); static_assert(!(r1 < nan)); static_assert(!(r1 <= nan)); static_assert(!(r1 == nan)); static_assert(r1 != nan); static_assert(!(r1 > nan)); static_assert(!(r1 >= nan)); static_assert(!(nan < 0)); static_assert(!(nan == 0)); static_assert(nan != 0); static_assert(!(nan <= 0)); static_assert(!(nan > 0)); static_assert(!(nan >= 0)); static_assert(!(0 < nan)); static_assert(!(0 <= nan)); static_assert(!(0 == nan)); static_assert(0 != nan); static_assert(!(0 > nan)); static_assert(!(0 >= nan)); } TEST(RationalTest, Increment) { Rational<int> r1, r2(0), r3(1), r7(14, -21), r8(-4, 6); EXPECT_EQ(r1++, r2); EXPECT_NE(r1, r2); EXPECT_EQ(r1, r3); EXPECT_EQ(--r1, r2); EXPECT_EQ(r8--, r7); EXPECT_NE(r8, r7); EXPECT_EQ(++r8, r7); Rational<int> x1 = std::numeric_limits<int>::max(); EXPECT_FALSE(x1.is_nan()); ++x1; EXPECT_TRUE(x1.is_nan()); Rational<int> x2 = std::numeric_limits<int>::min(); EXPECT_FALSE(x2.is_nan()); --x2; EXPECT_TRUE(x2.is_nan()); } TEST(RationalTest, UnaryOperators) { static constexpr Rational<int> r2(0), r3(1), r4(-3), r5(7, 2); static_assert(+r5 == r5); static_assert(-r3 != r3); static_assert(-(-r3) == r3); static_assert(-r4 == 3); static_assert(!r2); static_assert(!!r3); static_assert(r3); } TEST(RationalTest, Addition) { using T = int; using rational_type = Rational<T>; EXPECT_EQ(rational_type(1, 2) + rational_type(1, 2), static_cast<T>(1)); EXPECT_EQ(rational_type(11, 3) + rational_type(1, 2), rational_type(25, 6)); EXPECT_EQ(rational_type(-8, 3) + rational_type(1, 5), rational_type(-37, 15)); EXPECT_EQ(rational_type(-7, 6) + rational_type(1, 7), rational_type(1, 7) - rational_type(7, 6)); EXPECT_EQ(rational_type(13, 5) - rational_type(1, 2), rational_type(21, 10)); EXPECT_EQ(rational_type(22, 3) + static_cast<T>(1), rational_type(25, 3)); EXPECT_EQ(rational_type(12, 7) - static_cast<T>(2), rational_type(-2, 7)); EXPECT_EQ(static_cast<T>(3) + rational_type(4, 5), rational_type(19, 5)); EXPECT_EQ(static_cast<T>(4) - rational_type(9, 2), rational_type(-1, 2)); rational_type r(11); r -= rational_type(20, 3); EXPECT_EQ(r, rational_type(13, 3)); r += rational_type(1, 2); EXPECT_EQ(r, rational_type(29, 6)); r -= static_cast<T>(5); EXPECT_EQ(r, rational_type(1, -6)); r += rational_type(1, 5); EXPECT_EQ(r, rational_type(1, 30)); r += static_cast<T>(2); EXPECT_EQ(r, rational_type(61, 30)); } TEST(RationalTest, Multiplication) { using T = int; using rational_type = Rational<T>; EXPECT_EQ(rational_type(1, 3) * rational_type(-3, 4), rational_type(-1, 4)); EXPECT_EQ(rational_type(2, 5) * static_cast<T>(7), rational_type(14, 5)); EXPECT_EQ(static_cast<T>(-2) * rational_type(1, 6), rational_type(-1, 3)); rational_type r = rational_type(3, 7); r *= static_cast<T>(14); EXPECT_EQ(r, static_cast<T>(6)); r *= rational_type(3, 8); EXPECT_EQ(r, rational_type(9, 4)); } TEST(RationalTest, Division) { using T = int; using rational_type = Rational<T>; EXPECT_EQ(rational_type(-1, 20) / rational_type(4, 5), rational_type(-1, 16)); EXPECT_EQ(rational_type(5, 6) / static_cast<T>(7), rational_type(5, 42)); EXPECT_EQ(static_cast<T>(8) / rational_type(2, 7), static_cast<T>(28)); EXPECT_TRUE((rational_type(23, 17) / rational_type()).is_nan()); EXPECT_TRUE((rational_type(4, 15) / static_cast<T>(0)).is_nan()); rational_type r = rational_type(4, 3); r /= rational_type(5, 4); EXPECT_EQ(r, rational_type(16, 15)); r /= static_cast<T>(4); EXPECT_EQ(r, rational_type(4, 15)); EXPECT_TRUE((r /= rational_type()).is_nan()); EXPECT_TRUE((r /= static_cast<T>(0)).is_nan()); EXPECT_EQ(rational_type(-1) / rational_type(-3), rational_type(1, 3)); } TEST(RationalTest, AssignArithmetic) { using T = int; using rational_type = Rational<T>; rational_type r = rational_type(4, 3); r += r; EXPECT_EQ(r, rational_type(8, 3)); r *= r; EXPECT_EQ(r, rational_type(64, 9)); rational_type s = r; r /= s; EXPECT_EQ(r, rational_type(1, 1)); s = r; r -= s; EXPECT_EQ(r, rational_type(0, 1)); s = r; EXPECT_TRUE((r /= s).is_nan()); } TEST(RationalTest, Ostream) { EXPECT_EQ("nan", tensorstore::StrCat(Rational<int>::nan())); EXPECT_EQ("5", tensorstore::StrCat(Rational<int>(5))); EXPECT_EQ("22/7", tensorstore::StrCat(Rational<int>(44, 14))); } TEST(RationalTest, Overflow) { using R = Rational<int32_t>; { R r = R(2147483647) + R(1); EXPECT_TRUE(r.is_nan()); } { R r = R(2147483647) - R(-1); EXPECT_TRUE(r.is_nan()); } { R r = R(2147483647) * R(2); EXPECT_TRUE(r.is_nan()); } EXPECT_EQ(R(2147483647, 2), R(2147483647) / R(2)); { R r = R(2147483647, 2) * R(3); EXPECT_TRUE(r.is_nan()); } { R r = R(2147483647, 2) / R(1, 3); EXPECT_TRUE(r.is_nan()); } } TEST(UnifyDenominatorsTest, Overflow) { using R = Rational<int32_t>; int32_t num0, num1, den; EXPECT_FALSE( R::UnifyDenominators({1, 2147483647}, {1, 2147483646}, num0, num1, den)); EXPECT_FALSE(R::UnifyDenominators(R::nan(), 1, num0, num1, den)); EXPECT_FALSE(R::UnifyDenominators(1, R::nan(), num0, num1, den)); EXPECT_FALSE(R::UnifyDenominators(R::nan(), R::nan(), num0, num1, den)); } TEST(UnifyDenominatorsTest, NoOverflow) { using R = Rational<int32_t>; R r0(1, 3); R r1(1, 2); int32_t num0, num1, den; EXPECT_TRUE(R::UnifyDenominators(r0, r1, num0, num1, den)); EXPECT_EQ(2, num0); EXPECT_EQ(3, num1); EXPECT_EQ(6, den); } TEST(FromDoubleTest, Simple) { using R = Rational<int64_t>; EXPECT_EQ(R(0), R::FromDouble(0)); EXPECT_EQ(R(1, 2), R::FromDouble(0.5)); EXPECT_EQ(R(1, 4), R::FromDouble(0.25)); EXPECT_EQ(R(1, 8), R::FromDouble(0.125)); EXPECT_EQ(R(-1), R::FromDouble(-1)); EXPECT_EQ(R(1), R::FromDouble(1)); EXPECT_EQ(R(5404319552844595, 18014398509481984), R::FromDouble(0.3)); EXPECT_EQ(R(-5404319552844595, 18014398509481984), R::FromDouble(-0.3)); for (int i = 1; i <= 62; ++i) { SCOPED_TRACE(tensorstore::StrCat("i=", i)); EXPECT_EQ(R(1, static_cast<int64_t>(1) << i), R::FromDouble(std::ldexp(1.0, -i))); EXPECT_EQ(R(-1, static_cast<int64_t>(1) << i), R::FromDouble(std::ldexp(-1.0, -i))); EXPECT_EQ(R(static_cast<int64_t>(1) << i), R::FromDouble(std::ldexp(1.0, i))); EXPECT_EQ(R(static_cast<int64_t>(-1) << i), R::FromDouble(std::ldexp(-1.0, i))); } EXPECT_EQ(R(1, static_cast<int64_t>(1) << 53), R::FromDouble(0x1.0000000000000p-53)); EXPECT_EQ(R(0), R::FromDouble(0x1.0000000000000p-63)); EXPECT_EQ(R(884279719003555, 281474976710656), R::FromDouble(pi)); } TEST(ApproximateTest, Simple) { using R = Rational<int64_t>; EXPECT_EQ(R(1), R(1).Approximate(100)); EXPECT_EQ(R(-1), R(-1).Approximate(100)); EXPECT_EQ(R(-100), R(-100).Approximate(100)); EXPECT_EQ(R(1, 3), R::FromDouble(0.33333333333333333333333).Approximate(1000000)); EXPECT_EQ(R(-1, 3), R::FromDouble(-0.33333333333333333333333).Approximate(1000000)); EXPECT_EQ(R(3, 10), R::FromDouble(0.3).Approximate(1000000)); EXPECT_EQ(R(1, 5), R::FromDouble(1.0 / 5.0).Approximate(1000000)); EXPECT_EQ(R(22, 7), R::FromDouble(pi).Approximate(10)); EXPECT_EQ(R(311, 99), R::FromDouble(pi).Approximate(100)); EXPECT_EQ(R(355, 113), R::FromDouble(pi).Approximate(1000)); EXPECT_EQ(R(312689, 99532), R::FromDouble(pi).Approximate(100000)); } }
constexpr Rational(I value = 0) : n_(value), d_(1) {}
TEST(RationalTest, Initialization) { static constexpr Rational<int> r1, r2(0), r3(1), r4(-3), r5(7, 2), r6(5, 15), r7(14, -21), r8(-4, 6), r9(-14, -70); static_assert(r1.numerator() == 0); static_assert(r2.numerator() == 0); static_assert(r3.numerator() == 1); static_assert(r4.numerator() == -3); static_assert(r5.numerator() == 7); static_assert(r6.numerator() == 1); static_assert(r7.numerator() == -2); static_assert(r8.numerator() == -2); static_assert(r9.numerator() == 1); static_assert(r1.denominator() == 1); static_assert(r2.denominator() == 1); static_assert(r3.denominator() == 1); static_assert(r4.denominator() == 1); static_assert(r5.denominator() == 2); static_assert(r6.denominator() == 3); static_assert(r7.denominator() == 3); static_assert(r8.denominator() == 3); static_assert(r9.denominator() == 5); static_assert(Rational<int>(0, 0).is_nan()); static_assert(Rational<int>(1, std::numeric_limits<int>::min()).is_nan()); static_assert(!Rational<int>(1, -std::numeric_limits<int>::max()).is_nan()); }
#include "xla/service/cpu/shape_partition.h" #include <algorithm> #include <cmath> #include <cstdint> #include <utility> #include <vector> namespace xla { namespace cpu { std::vector<int64_t> ShapePartitionAssigner::Run( int64_t target_partition_count) { std::vector<int64_t> outer_dims; int64_t outer_dim_size = 1; for (int i = shape_.layout().minor_to_major_size() - 1; i >= 0; --i) { const int64_t dimension = shape_.layout().minor_to_major(i); outer_dims.push_back(dimension); outer_dim_size *= shape_.dimensions(dimension); if (outer_dim_size >= target_partition_count) { break; } } target_partition_count = std::min(outer_dim_size, target_partition_count); const int64_t target_dim_partition_count = std::pow( static_cast<double>(target_partition_count), 1.0 / outer_dims.size()); std::vector<int64_t> dimension_partition_counts(outer_dims.size()); for (int64_t i = 0; i < outer_dims.size(); ++i) { dimension_partition_counts[i] = std::min(static_cast<int64_t>(shape_.dimensions(outer_dims[i])), target_dim_partition_count); } if (GetTotalPartitionCount(dimension_partition_counts) < target_partition_count) { for (int64_t i = 0; i < dimension_partition_counts.size(); ++i) { const int64_t current_dim_partition_count = dimension_partition_counts[i]; const int64_t other_dims_partition_count = GetTotalPartitionCount(dimension_partition_counts) / current_dim_partition_count; int64_t additional_partition_count = target_partition_count / other_dims_partition_count - current_dim_partition_count; additional_partition_count = std::min( shape_.dimensions(outer_dims[i]) - dimension_partition_counts[i], additional_partition_count); if (additional_partition_count > 0) { dimension_partition_counts[i] += additional_partition_count; } } } return dimension_partition_counts; } int64_t ShapePartitionAssigner::GetTotalPartitionCount( const std::vector<int64_t>& dimension_partition_counts) { int64_t total_partition_count = 1; for (int64_t dim_partition_count : dimension_partition_counts) { total_partition_count *= dim_partition_count; } return total_partition_count; } ShapePartitionIterator::ShapePartitionIterator( const Shape& shape, absl::Span<const int64_t> dimension_partition_counts) : shape_(shape), dimension_partition_counts_(dimension_partition_counts.begin(), dimension_partition_counts.end()), dimensions_(dimension_partition_counts_.size()), dimension_partition_sizes_(dimension_partition_counts_.size()), dimension_partition_strides_(dimension_partition_counts_.size()) { for (int i = 0; i < dimensions_.size(); ++i) { dimensions_[i] = shape_.layout().minor_to_major( shape_.layout().minor_to_major_size() - 1 - i); } for (int i = 0; i < dimension_partition_sizes_.size(); ++i) { const int64_t dim_size = shape_.dimensions(dimensions_[i]); dimension_partition_sizes_[i] = std::max(int64_t{1}, dim_size / dimension_partition_counts_[i]); } dimension_partition_strides_[dimension_partition_strides_.size() - 1] = 1; for (int i = dimension_partition_strides_.size() - 2; i >= 0; --i) { dimension_partition_strides_[i] = dimension_partition_strides_[i + 1] * dimension_partition_counts_[i + 1]; } } std::vector<std::pair<int64_t, int64_t>> ShapePartitionIterator::GetPartition( int64_t index) const { std::vector<std::pair<int64_t, int64_t>> partition(dimensions_.size()); for (int64_t i = 0; i < partition.size(); ++i) { const int64_t partition_index = index / dimension_partition_strides_[i]; partition[i].first = partition_index * dimension_partition_sizes_[i]; if (partition_index == dimension_partition_counts_[i] - 1) { partition[i].second = shape_.dimensions(dimensions_[i]) - partition[i].first; } else { partition[i].second = dimension_partition_sizes_[i]; } CHECK_GT(partition[i].second, 0); index -= partition_index * dimension_partition_strides_[i]; } return partition; } int64_t ShapePartitionIterator::GetTotalPartitionCount() const { return ShapePartitionAssigner::GetTotalPartitionCount( dimension_partition_counts_); } } }
#include "xla/service/cpu/shape_partition.h" #include <algorithm> #include <random> #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" namespace xla { namespace cpu { namespace { class ShapePartitionAssignerTest : public HloTestBase { protected: typedef std::vector<int64_t> Vec; void RunR2Test(const Shape& shape, int64_t max_target_partition_count, const std::vector<int64_t>* expected_partitions) { ShapePartitionAssigner assigner(shape); for (int64_t i = 1; i <= max_target_partition_count; ++i) { std::vector<int64_t> actual_partitions = assigner.Run(i); EXPECT_THAT(actual_partitions, expected_partitions[i - 1]); } } }; TEST_F(ShapePartitionAssignerTest, Shape13WithLayout10) { std::vector<int64_t> expected_partitions[] = {{1} , {1, 2} }; RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 3}, {1, 0}), 2, expected_partitions); } TEST_F(ShapePartitionAssignerTest, Shape31WithLayout01) { std::vector<int64_t> expected_partitions[] = { {1} , {1, 2} }; RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 1}, {0, 1}), 2, expected_partitions); } TEST_F(ShapePartitionAssignerTest, Shape53WithLayout10) { std::vector<int64_t> expected_partitions[] = {{1} , {2} , {3} , {4} , {5} , {3, 2} }; RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0}), 6, expected_partitions); } TEST_F(ShapePartitionAssignerTest, Shape53WithLayout01) { std::vector<int64_t> expected_partitions[] = { {1} , {2} , {3} , {2, 2} }; RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {0, 1}), 4, expected_partitions); } TEST_F(ShapePartitionAssignerTest, Shape532WithLayout210) { std::vector<int64_t> expected_partitions[] = { {1} , {2} , {3} , {4} , {5} , {3, 2} , {3, 2} , {4, 2} , {3, 3} , {3, 3} , {3, 3} , {4, 3} , {4, 3} , {4, 3} , {5, 3} , {4, 2, 2} }; RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 1, 0}), 16, expected_partitions); } TEST_F(ShapePartitionAssignerTest, Shape532WithLayout201) { std::vector<int64_t> expected_partitions[] = { {1} , {2} , {3} , {2, 2} , {2, 2} , {3, 2} , {3, 2} , {3, 2} , {3, 3} , {3, 3} , {3, 3} , {3, 4} , {3, 4} , {3, 4} , {3, 5} , {3, 2, 2} }; RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 0, 1}), 16, expected_partitions); } class ShapePartitionIteratorTest : public HloTestBase { protected: typedef std::vector<std::pair<int64_t, int64_t>> Partition; }; TEST_F(ShapePartitionIteratorTest, Shape53WithLayout10) { Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0}); { ShapePartitionIterator iterator(shape, {1}); EXPECT_EQ(1, iterator.GetTotalPartitionCount()); EXPECT_TRUE(absl::c_equal(Partition({{0, 5}}), iterator.GetPartition(0))); } { ShapePartitionIterator iterator(shape, {2}); EXPECT_EQ(2, iterator.GetTotalPartitionCount()); EXPECT_TRUE(absl::c_equal(Partition({{0, 2}}), iterator.GetPartition(0))); EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(1))); } { ShapePartitionIterator iterator(shape, {3}); EXPECT_EQ(3, iterator.GetTotalPartitionCount()); EXPECT_TRUE(absl::c_equal(Partition({{0, 1}}), iterator.GetPartition(0))); EXPECT_TRUE(absl::c_equal(Partition({{1, 1}}), iterator.GetPartition(1))); EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(2))); } } TEST_F(ShapePartitionIteratorTest, Shape532WithLayout210) { Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 1, 0}); { ShapePartitionIterator iterator(shape, {1, 1}); EXPECT_EQ(1, iterator.GetTotalPartitionCount()); EXPECT_TRUE( absl::c_equal(Partition({{0, 5}, {0, 3}}), iterator.GetPartition(0))); } { ShapePartitionIterator iterator(shape, {2, 2}); EXPECT_EQ(4, iterator.GetTotalPartitionCount()); EXPECT_TRUE( absl::c_equal(Partition({{0, 2}, {0, 1}}), iterator.GetPartition(0))); EXPECT_TRUE( absl::c_equal(Partition({{0, 2}, {1, 2}}), iterator.GetPartition(1))); EXPECT_TRUE( absl::c_equal(Partition({{2, 3}, {0, 1}}), iterator.GetPartition(2))); EXPECT_TRUE( absl::c_equal(Partition({{2, 3}, {1, 2}}), iterator.GetPartition(3))); } } class RandomShapePartitionIteratorTest : public HloTestBase { protected: typedef std::vector<std::pair<int64_t, int64_t>> Partition; RandomShapePartitionIteratorTest() : generator_(rd_()), distribution_(1, 10) {} std::vector<int64_t> RandR4Dims() { return {Rand(), Rand(), Rand(), Rand()}; } int64_t Rand() { return distribution_(generator_); } std::random_device rd_; std::mt19937 generator_; std::uniform_int_distribution<int> distribution_; }; TEST_F(RandomShapePartitionIteratorTest, RandomShapeAndPartitions) { Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, RandR4Dims(), {3, 2, 1, 0}); const int num_outer_dims_to_partition = 1 + (Rand() % 3); std::vector<int64_t> dim_sizes(num_outer_dims_to_partition); std::vector<int64_t> dim_partition_counts(num_outer_dims_to_partition); int64_t total_dim_size = 1; for (int i = 0; i < num_outer_dims_to_partition; ++i) { const int64_t dimension = shape.layout().minor_to_major( shape.layout().minor_to_major_size() - 1 - i); dim_sizes[i] = shape.dimensions(dimension); total_dim_size *= dim_sizes[i]; const int64_t dim_partition_count = 1 + Rand() % dim_sizes[i]; dim_partition_counts[i] = dim_partition_count; } std::vector<std::map<int64_t, int64_t>> ranges(num_outer_dims_to_partition); ShapePartitionIterator partition_iterator(shape, dim_partition_counts); const int64_t partition_count = partition_iterator.GetTotalPartitionCount(); for (int64_t i = 0; i < partition_count; ++i) { const auto& dim_partition = partition_iterator.GetPartition(i); for (int dim = 0; dim < dim_partition.size(); ++dim) { ranges[dim].insert( std::make_pair(dim_partition[dim].first, dim_partition[dim].first + dim_partition[dim].second)); } } for (int i = 0; i < ranges.size(); ++i) { int64_t expected_index = 0; for (auto& r : ranges[i]) { EXPECT_EQ(expected_index, r.first); expected_index = r.second; } EXPECT_EQ(expected_index, dim_sizes[i]); } } } } }
std::vector<std::pair<int64_t, int64_t>> ShapePartitionIterator::GetPartition( int64_t index) const { std::vector<std::pair<int64_t, int64_t>> partition(dimensions_.size()); for (int64_t i = 0; i < partition.size(); ++i) { const int64_t partition_index = index / dimension_partition_strides_[i]; partition[i].first = partition_index * dimension_partition_sizes_[i]; if (partition_index == dimension_partition_counts_[i] - 1) { partition[i].second = shape_.dimensions(dimensions_[i]) - partition[i].first; } else { partition[i].second = dimension_partition_sizes_[i]; } CHECK_GT(partition[i].second, 0); index -= partition_index * dimension_partition_strides_[i]; } return partition; }
TEST_F(ShapePartitionIteratorTest, Shape53WithLayout10) { Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0}); { ShapePartitionIterator iterator(shape, {1}); EXPECT_EQ(1, iterator.GetTotalPartitionCount()); EXPECT_TRUE(absl::c_equal(Partition({{0, 5}}), iterator.GetPartition(0))); } { ShapePartitionIterator iterator(shape, {2}); EXPECT_EQ(2, iterator.GetTotalPartitionCount()); EXPECT_TRUE(absl::c_equal(Partition({{0, 2}}), iterator.GetPartition(0))); EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(1))); } { ShapePartitionIterator iterator(shape, {3}); EXPECT_EQ(3, iterator.GetTotalPartitionCount()); EXPECT_TRUE(absl::c_equal(Partition({{0, 1}}), iterator.GetPartition(0))); EXPECT_TRUE(absl::c_equal(Partition({{1, 1}}), iterator.GetPartition(1))); EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(2))); } } TEST_F(ShapePartitionIteratorTest, Shape532WithLayout210) { Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 1, 0}); { ShapePartitionIterator iterator(shape, {1, 1}); EXPECT_EQ(1, iterator.GetTotalPartitionCount()); EXPECT_TRUE( absl::c_equal(Partition({{0, 5}, {0, 3}}), iterator.GetPartition(0))); } { ShapePartitionIterator iterator(shape, {2, 2}); EXPECT_EQ(4, iterator.GetTotalPartitionCount()); EXPECT_TRUE( absl::c_equal(Partition({{0, 2}, {0, 1}}), iterator.GetPartition(0))); EXPECT_TRUE( absl::c_equal(Partition({{0, 2}, {1, 2}}), iterator.GetPartition(1))); EXPECT_TRUE( absl::c_equal(Partition({{2, 3}, {0, 1}}), iterator.GetPartition(2))); EXPECT_TRUE( absl::c_equal(Partition({{2, 3}, {1, 2}}), iterator.GetPartition(3))); } }
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h" #include <functional> #include <optional> #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/types/optional.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/Quant/QuantTypes.h" #include "mlir/IR/Block.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Diagnostics.h" #include "mlir/IR/Matchers.h" #include "mlir/IR/Operation.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/SymbolTable.h" #include "mlir/IR/Types.h" #include "mlir/Support/LLVM.h" #include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_quantize_op_utils.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir { namespace quant { namespace { constexpr StringRef kDequantizeFunctionName = "composite_dequantize"; constexpr StringRef kUniformQuantizationFunctionName = "uniform"; func::FuncOp PrepareFunctionRegister(PatternRewriter& rewriter, Value input_val, ShapedType result_type, StringRef func_name, Value& func_input_arg) { Operation* input_op = input_val.getDefiningOp(); Operation* insertion_point = input_op->getParentOfType<func::FuncOp>(); if (!insertion_point) insertion_point = input_op->getParentOfType<ModuleOp>(); rewriter.setInsertionPointAfter(insertion_point); UnrankedTensorType create_unknown_input_shape = CreateUnknownShapeFromElementType(input_val.getType()); UnrankedTensorType create_unknown_output_shape = CreateUnknownShapeFromElementType(result_type); FunctionType func_type = FunctionType::get(rewriter.getContext(), {create_unknown_input_shape}, {create_unknown_output_shape}); func::FuncOp quantization_func = rewriter.create<func::FuncOp>(input_op->getLoc(), func_name, func_type); OpBuilder::InsertionGuard guard = OpBuilder::InsertionGuard(rewriter); ArrayRef<Type> inputs = quantization_func.getFunctionType().getInputs(); Block* block = rewriter.createBlock( &quantization_func.getBody(), quantization_func.begin(), inputs, SmallVector<Location>(inputs.size(), quantization_func.getLoc())); func_input_arg = block->getArgument(0); return quantization_func; } TF::PartitionedCallOp FinalizeFunctionRegister( PatternRewriter& rewriter, Value input, Value output, func::FuncOp& quantization_func, Operation* quantized_op, StringRef func_name, IRRewriter::InsertPoint original_point, Type quantize_result_type) { rewriter.create<func::ReturnOp>(input.getLoc(), ArrayRef<Value>({output})); quantization_func.setVisibility(func::FuncOp::Visibility::Private); SymbolTable symbol_table(quantized_op->getParentOfType<ModuleOp>()); symbol_table.insert(quantization_func); FlatSymbolRefAttr func_name_attr = FlatSymbolRefAttr::get(rewriter.getStringAttr(func_name)); rewriter.restoreInsertionPoint(original_point); auto quantize_call = rewriter.create<TF::PartitionedCallOp>( quantized_op->getLoc(), quantize_result_type, input, func_name_attr, "", "", ""); return quantize_call; } std::optional<TF::PartitionedCallOp> RegisterOperationsInFuncOp( StringRef func_name, PatternRewriter& rewriter, QuantizedType quant_type, Value input_val, ShapedType result_type, std::function<Operation*(PatternRewriter&, Operation*, Value, ShapedType, QuantizedType)> quantization_operations_func) { Operation* input_op = input_val.getDefiningOp(); auto original_point = rewriter.saveInsertionPoint(); auto unique_func_name = func_name.str(); SymbolTable symbol_table(input_op->getParentOfType<ModuleOp>()); while (symbol_table.lookup(unique_func_name)) { absl::StrAppend(&unique_func_name, "_"); } Value func_input_arg; func::FuncOp func_op = PrepareFunctionRegister( rewriter, input_val, result_type, unique_func_name, func_input_arg); Operation* last_op_in_func = quantization_operations_func(rewriter, func_op.getOperation(), func_input_arg, result_type, quant_type); auto end_call_op = FinalizeFunctionRegister( rewriter, input_val, last_op_in_func->getResult(0), func_op, input_op, unique_func_name, original_point, result_type); return end_call_op; } QuantizedType CalculateUniformQuantParams( PatternRewriter& rewriter, TF::ConstOp op, tensorflow::quantization::QuantizationComponentSpec& weight_spec) { const bool kIsNarrowRange = true; const bool kIsSigned = true; const int kBitWidth = 8; DenseFPElementsAttr attr; if (!matchPattern(op->getResult(0), m_Constant(&attr))) return nullptr; QuantizedType quant_type = mlir::dyn_cast<quant::QuantizedType>( quant::GetUniformQuantizedTypeForWeight( attr, kIsNarrowRange && kIsSigned, kBitWidth, kIsSigned, kIsNarrowRange, false)); return quant_type; } std::optional<Value> AddUniformQuantizeOps(PatternRewriter& rewriter, TF::ConstOp op, QuantizedType quant_type) { DenseFPElementsAttr attr; if (!matchPattern(op->getResult(0), m_Constant(&attr))) { return nullptr; } Type expressed_type = op.getResult().getType(); Type quantized_type = quant_type.castFromExpressedType(expressed_type); ShapedType shaped_quantized_type = mlir::cast<ShapedType>(quantized_type); DenseElementsAttr tensor_proto_attr = mlir::dyn_cast<DenseElementsAttr>(Quantize(attr, shaped_quantized_type)); if (!tensor_proto_attr) { return nullptr; } Type storage_type = mlir::cast<QuantizedType>(shaped_quantized_type.getElementType()) .getStorageType(); ShapedType new_type = shaped_quantized_type.clone(storage_type); rewriter.setInsertionPointAfter(op); auto const_op = rewriter.create<TF::ConstOp>(op.getLoc(), new_type, tensor_proto_attr); auto new_identity_op = rewriter.create<TF::IdentityOp>( op->getLoc(), const_op.getType(), const_op); return new_identity_op.getResult(); } Operation* LogicsForUniformDequanization(PatternRewriter& rewriter, Operation* func_op, Value input_val, ShapedType original_input_tensor_type, QuantizedType quant_type) { auto loc = input_val.getLoc(); rewriter.setInsertionPointToStart( &(cast<func::FuncOp>(func_op)).getBody().front()); UnrankedTensorType create_unknown_input_shape = CreateUnknownShapeFromElementType(original_input_tensor_type); auto new_cast_op = rewriter.create<TF::CastOp>(loc, create_unknown_input_shape, input_val); auto qtype = mlir::dyn_cast<UniformQuantizedType>(quant_type); TensorType scale_type = RankedTensorType::get({}, rewriter.getF32Type()); Value scale_op = rewriter.create<TF::ConstOp>( loc, scale_type, DenseFPElementsAttr::get(scale_type, {static_cast<float>(qtype.getScale())})); if (original_input_tensor_type.getElementType().isBF16()) { scale_op = rewriter.create<TF::CastOp>( loc, UnrankedTensorType::get(rewriter.getBF16Type()), scale_op); } auto mul_op = rewriter.create<TF::MulOp>(loc, new_cast_op.getType(), scale_op, new_cast_op); return mul_op; } std::optional<TF::PartitionedCallOp> AddUniformDequantizeOps( PatternRewriter& rewriter, QuantizedType quant_type, Value val_to_dequantize, ShapedType result_type) { auto func_name = absl::StrJoin( {kDequantizeFunctionName, kUniformQuantizationFunctionName}, "_"); std::optional<TF::PartitionedCallOp> dequant_op = RegisterOperationsInFuncOp( func_name, rewriter, quant_type, val_to_dequantize, result_type, LogicsForUniformDequanization); return dequant_op; } } std::optional<TF::PartitionedCallOp> ApplyUniformQuantization( PatternRewriter& rewriter, TF::ConstOp op, tensorflow::quantization::QuantizationComponentSpec& weight_spec) { QuantizedType quant_type = CalculateUniformQuantParams(rewriter, op, weight_spec); if (!quant_type) return nullptr; std::optional<Value> quantized_val = AddUniformQuantizeOps(rewriter, op, quant_type); if (!quantized_val.has_value()) return std::nullopt; std::optional<TF::PartitionedCallOp> dequantized_val = AddUniformDequantizeOps(rewriter, quant_type, quantized_val.value(), mlir::cast<ShapedType>(op.getType())); return dequantized_val; } } }
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h" #include <optional> #include <gtest/gtest.h> #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/Quant/QuantOps.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/OwningOpRef.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/Value.h" #include "mlir/Support/LLVM.h" #include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir::quant { namespace { using QuantizationComponentSpec = tensorflow::quantization::QuantizationComponentSpec; class EmptyPatternRewriter : public mlir::PatternRewriter { public: explicit EmptyPatternRewriter(const OpBuilder& other_builder) : mlir::PatternRewriter(other_builder) {} ~EmptyPatternRewriter() override = default; }; TEST(TfQuantOpTest, applyUniformQuantization) { MLIRContext context; OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context))); OpBuilder builder(&module->getBodyRegion()); context.loadDialect<TF::TensorFlowDialect, quant::QuantizationDialect, func::FuncDialect>(); EmptyPatternRewriter pattern_rewriter(builder); Value value = CreateConstValue<float>(builder, module->getLoc(), {1024, 2}, SmallVector<float>(2048, 0)); QuantizationComponentSpec quant_spec; quant_spec.set_quantization_component( QuantizationComponentSpec::COMPONENT_WEIGHT); quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8); std::optional<TF::PartitionedCallOp> dequantize_op = ApplyUniformQuantization( pattern_rewriter, cast<TF::ConstOp>(value.getDefiningOp()), quant_spec); EXPECT_TRUE(dequantize_op.has_value()); EXPECT_EQ(dequantize_op.value().func().getName().str(), "composite_dequantize_uniform"); } } }
std::optional<TF::PartitionedCallOp> ApplyUniformQuantization( PatternRewriter& rewriter, TF::ConstOp op, tensorflow::quantization::QuantizationComponentSpec& weight_spec) { QuantizedType quant_type = CalculateUniformQuantParams(rewriter, op, weight_spec); if (!quant_type) return nullptr; std::optional<Value> quantized_val = AddUniformQuantizeOps(rewriter, op, quant_type); if (!quantized_val.has_value()) return std::nullopt; std::optional<TF::PartitionedCallOp> dequantized_val = AddUniformDequantizeOps(rewriter, quant_type, quantized_val.value(), mlir::cast<ShapedType>(op.getType())); return dequantized_val; }
TEST(TfQuantOpTest, applyUniformQuantization) { MLIRContext context; OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context))); OpBuilder builder(&module->getBodyRegion()); context.loadDialect<TF::TensorFlowDialect, quant::QuantizationDialect, func::FuncDialect>(); EmptyPatternRewriter pattern_rewriter(builder); Value value = CreateConstValue<float>(builder, module->getLoc(), {1024, 2}, SmallVector<float>(2048, 0)); QuantizationComponentSpec quant_spec; quant_spec.set_quantization_component( QuantizationComponentSpec::COMPONENT_WEIGHT); quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8); std::optional<TF::PartitionedCallOp> dequantize_op = ApplyUniformQuantization( pattern_rewriter, cast<TF::ConstOp>(value.getDefiningOp()), quant_spec); EXPECT_TRUE(dequantize_op.has_value()); EXPECT_EQ(dequantize_op.value().func().getName().str(), "composite_dequantize_uniform"); }
#include "arolla/memory/frame.h" #include <algorithm> #include <cstddef> #include <cstring> #include <tuple> #include <typeindex> #include <typeinfo> #include <utility> #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "arolla/util/algorithms.h" #include "arolla/util/memory.h" namespace arolla { std::type_index FrameLayout::FieldFactory::type_index() const { return type_; } void FrameLayout::FieldFactory::Add(size_t offset) { offsets_.push_back(offset); } void FrameLayout::FieldFactory::AddDerived( const FieldFactory& derived_factory) { DCHECK(type_index() == derived_factory.type_index()); for (size_t cur_offset : derived_factory.offsets_) { offsets_.push_back(cur_offset); } } FrameLayout::FieldFactory FrameLayout::FieldFactory::Derive( size_t offset) const { FieldFactory res = *this; for (size_t& cur_offset : res.offsets_) { cur_offset += offset; } return res; } void FrameLayout::FieldInitializers::AddOffsetToFactory( size_t offset, FieldFactory empty_factory) { auto it = type2factory.find(empty_factory.type_index()); if (it == type2factory.end()) { bool inserted; std::tie(it, inserted) = type2factory.emplace(empty_factory.type_index(), factories.size()); factories.push_back(std::move(empty_factory)); } DCHECK_LT(it->second, factories.size()); if (it->second < factories.size()) { factories[it->second].Add(offset); } } void FrameLayout::FieldInitializers::AddDerived( size_t offset, const FieldInitializers& derived_initializers) { for (const auto& [derived_tpe, derived_id] : derived_initializers.type2factory) { const auto& derived_factory = derived_initializers.factories[derived_id]; if (auto it = type2factory.find(derived_tpe); it != type2factory.end()) { factories[it->second].AddDerived(derived_factory.Derive(offset)); } else { type2factory.emplace(derived_tpe, factories.size()); factories.push_back(derived_factory.Derive(offset)); } } } FrameLayout::Slot<void> FrameLayout::Builder::AddSubFrame( const FrameLayout& subframe) { alloc_size_ = RoundUp(alloc_size_, subframe.AllocAlignment().value); size_t offset = alloc_size_; alloc_size_ += subframe.AllocSize(); alloc_alignment_ = std::max(alloc_alignment_, subframe.AllocAlignment().value); initializers_.AddDerived(offset, subframe.initializers_); #ifndef NDEBUG for (const auto& [field_offset, field_type] : subframe.registered_fields_) { registered_fields_.emplace(offset + field_offset, field_type); } #endif return FrameLayout::Slot<void>(offset); } absl::Status FrameLayout::Builder::RegisterUnsafeSlot( size_t byte_offset, size_t byte_size, const std::type_info& type) { return RegisterSlot(byte_offset, byte_size, type); } absl::Status FrameLayout::Builder::RegisterSlot(size_t byte_offset, size_t byte_size, const std::type_info& type, bool allow_duplicates) { if (byte_offset == FrameLayout::Slot<float>::kUninitializedOffset) { return absl::FailedPreconditionError( "unable to register uninitialized slot"); } if (byte_offset > alloc_size_ || byte_size > alloc_size_ - byte_offset) { return absl::FailedPreconditionError(absl::StrCat( "unable to register slot after the end of alloc, offset: ", byte_offset, ", size: ", byte_size, ", alloc size: ", alloc_size_)); } #ifndef NDEBUG if (!registered_fields_.emplace(byte_offset, std::type_index(type)).second && !allow_duplicates) { return absl::FailedPreconditionError(absl::StrCat( "slot is already registered ", byte_offset, " ", type.name())); } #endif return absl::OkStatus(); } }
#include "arolla/memory/frame.h" #include <array> #include <cstddef> #include <cstdint> #include <memory> #include <sstream> #include <string> #include <type_traits> #include <utility> #include <variant> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/dynamic_annotations.h" #include "absl/status/status.h" #include "arolla/memory/memory_allocation.h" #include "arolla/util/demangle.h" #include "arolla/util/is_bzero_constructible.h" #include "arolla/util/memory.h" #include "arolla/util/testing/status_matchers_backport.h" namespace arolla::testing { namespace { using ::arolla::testing::IsOk; using ::arolla::testing::StatusIs; using ::testing::ElementsAre; using ::testing::Eq; using ::testing::HasSubstr; using ::testing::IsEmpty; struct SimpleStruct { int a; float b; }; struct InitializedStruct { int a = 1; float b = 2.0; }; TEST(FrameLayoutTest, SlotOutput) { FrameLayout::Builder builder; auto slot = builder.AddSlot<int>(); std::ostringstream ss; ss << slot; EXPECT_EQ(ss.str(), std::string("Slot<") + TypeName<int>() + ">(0)"); } TEST(FrameLayoutTest, SimpleFields) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<int>(); auto slot2 = builder.AddSlot<float>(); auto slot3 = builder.AddSlot<double>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), Eq(0)); EXPECT_THAT(frame.Get(slot2), Eq(0.0f)); EXPECT_THAT(frame.Get(slot3), Eq(0.0)); frame.Set(slot1, 1); frame.Set(slot2, 2.0f); frame.Set(slot3, M_PI); EXPECT_THAT(frame.Get(slot1), Eq(1)); EXPECT_THAT(frame.Get(slot2), Eq(2.0f)); EXPECT_THAT(frame.Get(slot3), Eq(M_PI)); } TEST(FrameLayoutTest, SimpleArrays) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<std::array<int, 4>>(); auto slot2 = builder.AddSlot<std::array<float, 4>>(); auto slot3 = builder.AddSlot<std::array<char, 4>>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), ElementsAre(0, 0, 0, 0)); EXPECT_THAT(frame.Get(slot2), ElementsAre(0.0f, 0.0f, 0.0f, 0.0f)); EXPECT_THAT(frame.Get(slot3), ElementsAre(0, 0, 0, 0)); frame.Set(slot1, std::array<int, 4>{1, 2, 3, 4}); frame.Set(slot2, std::array<float, 4>{1.0f, 2.0f, 3.0f, 4.0f}); frame.Set(slot3, std::array<char, 4>{'a', 'b', 'c', 'd'}); EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3, 4)); EXPECT_THAT(frame.Get(slot2), ElementsAre(1.0f, 2.0f, 3.0f, 4.0f)); EXPECT_THAT(frame.Get(slot3), ElementsAre('a', 'b', 'c', 'd')); } TEST(FrameLayoutTest, SimplePointers) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<int*>(); auto slot2 = builder.AddSlot<char*>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), Eq(nullptr)); EXPECT_THAT(frame.Get(slot2), Eq(nullptr)); int int_values[] = {1, 2, 3, 4}; char text[] = "It was a dark and stormy night."; frame.Set(slot1, int_values); frame.Set(slot2, text); EXPECT_THAT(frame.Get(slot1), Eq(int_values)); EXPECT_THAT(frame.Get(slot2), Eq(text)); } TEST(FrameLayoutTest, SmartPointers) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<std::unique_ptr<int>>(); auto slot2 = builder.AddSlot<std::unique_ptr<std::string>>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), Eq(nullptr)); EXPECT_THAT(frame.Get(slot2), Eq(nullptr)); frame.Set(slot1, std::make_unique<int>(12)); frame.Set(slot2, std::make_unique<std::string>("It was a dark and stormy night.")); EXPECT_THAT(*frame.Get(slot1), Eq(12)); EXPECT_THAT(*frame.Get(slot2), Eq("It was a dark and stormy night.")); } TEST(FrameLayoutTest, Vector) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<std::vector<int>>(); auto slot2 = builder.AddSlot<std::vector<std::string>>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), IsEmpty()); EXPECT_THAT(frame.Get(slot2), IsEmpty()); auto* int_vector = frame.GetMutable(slot1); int_vector->push_back(1); int_vector->push_back(2); int_vector->push_back(3); auto* string_vector = frame.GetMutable(slot2); string_vector->push_back("How"); string_vector->push_back("now"); string_vector->push_back("brown"); string_vector->push_back("cow?"); EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3)); EXPECT_THAT(frame.Get(slot2), ElementsAre("How", "now", "brown", "cow?")); } TEST(FrameLayoutTest, Structs) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<SimpleStruct>(); auto slot2 = builder.AddSlot<InitializedStruct>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); const SimpleStruct& s1 = frame.Get(slot1); EXPECT_THAT(s1.a, Eq(0)); EXPECT_THAT(s1.b, Eq(0.0f)); const InitializedStruct& s2 = frame.Get(slot2); EXPECT_THAT(s2.a, Eq(1)); EXPECT_THAT(s2.b, Eq(2.0f)); } TEST(FrameLayoutTest, AFewDifferentTypesWellInitialized) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<std::vector<int>>(); auto slot2 = builder.AddSlot<std::vector<std::string>>(); auto slot3 = builder.AddSlot<std::vector<int>>(); auto slot4 = builder.AddSlot<SimpleStruct>(); auto slot5 = builder.AddSlot<InitializedStruct>(); auto slot6 = builder.AddSlot<std::vector<int>>(); auto slot7 = builder.AddSlot<std::vector<std::string>>(); auto slot8 = builder.AddSlot<std::vector<double>>(); auto slot9 = builder.AddSlot<InitializedStruct>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), IsEmpty()); EXPECT_THAT(frame.Get(slot2), IsEmpty()); EXPECT_THAT(frame.Get(slot3), IsEmpty()); EXPECT_THAT(frame.Get(slot6), IsEmpty()); EXPECT_THAT(frame.Get(slot7), IsEmpty()); EXPECT_THAT(frame.Get(slot8), IsEmpty()); const SimpleStruct& simple = frame.Get(slot4); EXPECT_THAT(simple.a, Eq(0)); EXPECT_THAT(simple.b, Eq(0.0f)); for (const InitializedStruct& init : {frame.Get(slot5), frame.Get(slot9)}) { EXPECT_THAT(init.a, Eq(1)); EXPECT_THAT(init.b, Eq(2.0f)); } } TEST(FrameLayoutTest, HasField) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<int>(); auto slot2 = builder.AddSlot<std::vector<int>>(); auto slot3 = builder.AddSlot<SimpleStruct>(); auto slot4 = builder.AddSlot<std::array<SimpleStruct, 4>>(); auto slot5 = builder.AddSlot<InitializedStruct>(); auto slot6 = builder.AddSlot<std::array<InitializedStruct, 4>>(); auto layout = std::move(builder).Build(); EXPECT_TRUE(layout.HasField(slot1.byte_offset(), typeid(int))); EXPECT_TRUE(layout.HasField(slot2.byte_offset(), typeid(std::vector<int>))); EXPECT_TRUE(layout.HasField(slot3.byte_offset(), typeid(SimpleStruct))); EXPECT_TRUE(layout.HasField(slot4.byte_offset(), typeid(std::array<SimpleStruct, 4>))); EXPECT_TRUE(layout.HasField(slot5.byte_offset(), typeid(InitializedStruct))); EXPECT_TRUE(layout.HasField(slot6.byte_offset(), typeid(std::array<InitializedStruct, 4>))); } TEST(FrameLayoutTest, RegisterUnsafeSlotWithEmptyField) { FrameLayout::Builder builder; ASSERT_TRUE(builder.RegisterUnsafeSlot(0, 0, typeid(std::monostate())).ok()); auto layout = std::move(builder).Build(); EXPECT_TRUE(layout.HasField(0, typeid(std::monostate()))); } TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafe) { FrameLayout::Builder builder; auto slot = builder.AddSlot<int32_t>(); auto slot_1part = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset()); auto slot_2part = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 2); ASSERT_THAT(builder.RegisterUnsafeSlot(slot_1part), IsOk()); ASSERT_THAT(builder.RegisterUnsafeSlot(slot_2part), IsOk()); ASSERT_THAT(builder.RegisterUnsafeSlot(slot.byte_offset() + 2, sizeof(int8_t), typeid(int8_t)), IsOk()); #ifndef NDEBUG EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part), StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("slot is already registered"))); EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part, true), IsOk()); #endif auto layout = std::move(builder).Build(); EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int32_t))); EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int16_t))); EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int16_t))); EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int8_t))); #ifndef NDEBUG EXPECT_FALSE(layout.HasField(slot.byte_offset() + 2, typeid(float))); EXPECT_FALSE(layout.HasField(slot.byte_offset() + 1, typeid(int8_t))); #endif } TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafeErrors) { FrameLayout::Builder builder; auto slot = builder.AddSlot<int32_t>(); auto slot_1part = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset()); auto slot_after_end = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 4); auto uninitialized_slot = FrameLayout::Slot<int16_t>::UnsafeUninitializedSlot(); auto status = builder.RegisterUnsafeSlot(slot_1part); ASSERT_OK(status); #ifndef NDEBUG status = builder.RegisterUnsafeSlot(slot); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("slot is already registered")); status = builder.RegisterUnsafeSlot(slot_1part); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("slot is already registered")); #endif status = builder.RegisterUnsafeSlot(slot_after_end); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("unable to register slot after the end of alloc")); status = builder.RegisterUnsafeSlot(100, sizeof(int), typeid(int)); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("unable to register slot after the end of alloc, " "offset: 100, size: 4, alloc size: 4")); status = builder.RegisterUnsafeSlot(uninitialized_slot); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("unable to register uninitialized slot")); } struct SelfReference { const SelfReference* self; SelfReference() : self(this) {} SelfReference(const SelfReference&) = delete; SelfReference& operator=(const SelfReference&) = delete; ~SelfReference() { self = nullptr; } }; TEST(FrameLayoutTest, AddSubFrame) { FrameLayout subframe_layout; std::vector<FrameLayout::Slot<SelfReference>> field_slots; { FrameLayout::Builder builder; for (int i = 0; i < 2; ++i) { field_slots.push_back(builder.AddSlot<SelfReference>()); } subframe_layout = std::move(builder).Build(); } FrameLayout frame_layout; std::vector<FrameLayout::Slot<void>> subframe_slots; { FrameLayout::Builder builder; builder.AddSlot<float>(); for (int j = 0; j < 3; ++j) { subframe_slots.push_back(builder.AddSubFrame(subframe_layout)); builder.AddSlot<double>(); } frame_layout = std::move(builder).Build(); } for (const auto& subframe_slot : subframe_slots) { for (const auto& field_slot : field_slots) { EXPECT_TRUE(frame_layout.HasField( subframe_slot.byte_offset() + field_slot.byte_offset(), typeid(SelfReference))); } } const auto alloc = AlignedAlloc(frame_layout.AllocAlignment(), frame_layout.AllocSize()); frame_layout.InitializeAlignedAlloc(alloc.get()); FramePtr frame(alloc.get(), &frame_layout); for (const auto& subframe_slot : subframe_slots) { for (const auto& field_slot : field_slots) { const void* subframe_ptr = frame.GetRawPointer(subframe_slot.byte_offset()); ConstFramePtr subframe(subframe_ptr, &subframe_layout); const SelfReference& field = subframe.Get(field_slot); EXPECT_TRUE(field.self == &field); } } frame_layout.DestroyAlloc(alloc.get()); ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(alloc.get(), frame_layout.AllocSize()); for (const auto& subframe_slot : subframe_slots) { for (const auto& field_slot : field_slots) { const void* subframe_ptr = frame.GetRawPointer(subframe_slot.byte_offset()); ConstFramePtr subframe(subframe_ptr, &subframe_layout); const SelfReference& field = subframe.Get(field_slot); EXPECT_TRUE(field.self == nullptr); } } } TEST(FrameLayoutTest, AddSubFrameAllocAlignment) { FrameLayout::Builder builder; builder.AddSubFrame(MakeTypeLayout<std::aligned_storage_t<16, 16>>()); builder.AddSubFrame(MakeTypeLayout<std::aligned_storage_t<16, 16>>()); auto frame_layout = std::move(builder).Build(); EXPECT_EQ(frame_layout.AllocSize(), 32); EXPECT_EQ(frame_layout.AllocAlignment().value, 16); } TEST(FrameLayoutTest, ArrayCompatibility) { FrameLayout::Builder builder; builder.AddSlot<std::aligned_storage_t<16, 16>>(); builder.AddSlot<std::aligned_storage_t<1, 1>>(); auto frame_layout = std::move(builder).Build(); EXPECT_EQ(frame_layout.AllocSize(), 32); EXPECT_EQ(frame_layout.AllocAlignment().value, 16); } TEST(FrameLayoutTest, InitDestroyAllocN) { static int instance_counter = 0; struct InstanceCounted { InstanceCounted() { ++instance_counter; } ~InstanceCounted() { --instance_counter; } }; struct SelfReferenced { SelfReferenced() : self(this) {} SelfReferenced* self; }; FrameLayout::Builder builder; auto int_slot = builder.AddSlot<int>(); auto self_ref_slot = builder.AddSlot<SelfReferenced>(); builder.AddSlot<InstanceCounted>(); auto layout = std::move(builder).Build(); const int n = 10; const auto alloc = AlignedAlloc(layout.AllocAlignment(), layout.AllocSize() * n); layout.InitializeAlignedAllocN(alloc.get(), n); EXPECT_EQ(instance_counter, n); for (int i = 0; i < n; ++i) { ConstFramePtr ith_frame( static_cast<const std::byte*>(alloc.get()) + i * layout.AllocSize(), &layout); EXPECT_EQ(ith_frame.Get(int_slot), 0); EXPECT_EQ(ith_frame.Get(self_ref_slot).self, &ith_frame.Get(self_ref_slot)); } layout.DestroyAllocN(alloc.get(), n); EXPECT_EQ(instance_counter, 0); } struct IsBZeroConstructible { static bool ctor_called; static bool dtor_called; IsBZeroConstructible() { ctor_called = true; } ~IsBZeroConstructible() { dtor_called = true; } }; bool IsBZeroConstructible::ctor_called; bool IsBZeroConstructible::dtor_called; } } namespace arolla { template <> struct is_bzero_constructible<::arolla::testing::IsBZeroConstructible> : std::true_type {}; } namespace arolla::testing { namespace { TEST(FrameLayoutTest, IsBZeroConstructibleHandling) { ASSERT_FALSE(IsBZeroConstructible::ctor_called); ASSERT_FALSE(IsBZeroConstructible::dtor_called); { auto layout = MakeTypeLayout<IsBZeroConstructible>(); MemoryAllocation alloc(&layout); } EXPECT_FALSE(IsBZeroConstructible::ctor_called); EXPECT_TRUE(IsBZeroConstructible::dtor_called); } } }
absl::Status FrameLayout::Builder::RegisterUnsafeSlot( size_t byte_offset, size_t byte_size, const std::type_info& type) { return RegisterSlot(byte_offset, byte_size, type); }
TEST(FrameLayoutTest, RegisterUnsafeSlotWithEmptyField) { FrameLayout::Builder builder; ASSERT_TRUE(builder.RegisterUnsafeSlot(0, 0, typeid(std::monostate())).ok()); auto layout = std::move(builder).Build(); EXPECT_TRUE(layout.HasField(0, typeid(std::monostate()))); } TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafe) { FrameLayout::Builder builder; auto slot = builder.AddSlot<int32_t>(); auto slot_1part = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset()); auto slot_2part = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 2); ASSERT_THAT(builder.RegisterUnsafeSlot(slot_1part), IsOk()); ASSERT_THAT(builder.RegisterUnsafeSlot(slot_2part), IsOk()); ASSERT_THAT(builder.RegisterUnsafeSlot(slot.byte_offset() + 2, sizeof(int8_t), typeid(int8_t)), IsOk()); #ifndef NDEBUG EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part), StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("slot is already registered"))); EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part, true), IsOk()); #endif auto layout = std::move(builder).Build(); EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int32_t))); EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int16_t))); EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int16_t))); EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int8_t))); #ifndef NDEBUG EXPECT_FALSE(layout.HasField(slot.byte_offset() + 2, typeid(float))); EXPECT_FALSE(layout.HasField(slot.byte_offset() + 1, typeid(int8_t))); #endif } TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafeErrors) { FrameLayout::Builder builder; auto slot = builder.AddSlot<int32_t>(); auto slot_1part = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset()); auto slot_after_end = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 4); auto uninitialized_slot = FrameLayout::Slot<int16_t>::UnsafeUninitializedSlot(); auto status = builder.RegisterUnsafeSlot(slot_1part); ASSERT_OK(status); #ifndef NDEBUG status = builder.RegisterUnsafeSlot(slot); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("slot is already registered")); status = builder.RegisterUnsafeSlot(slot_1part); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("slot is already registered")); #endif status = builder.RegisterUnsafeSlot(slot_after_end); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("unable to register slot after the end of alloc")); status = builder.RegisterUnsafeSlot(100, sizeof(int), typeid(int)); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("unable to register slot after the end of alloc, " "offset: 100, size: 4, alloc size: 4")); status = builder.RegisterUnsafeSlot(uninitialized_slot); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("unable to register uninitialized slot")); }
#include "xla/tools/xla_compile_lib.h" #include <cmath> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "google/protobuf/duration.pb.h" #include "absl/cleanup/cleanup.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/DialectRegistry.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/OwningOpRef.h" #include "mlir/Parser/Parser.h" #include "stablehlo/dialect/Register.h" #include "xla/client/xla_computation.h" #include "xla/debug_options_flags.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_module_group.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/pjrt/mlir_to_hlo.h" #include "xla/service/compiler.h" #include "xla/service/cpu/cpu_compiler.h" #include "xla/service/cpu/cpu_executable.h" #include "xla/service/executable.h" #include "xla/service/export_hlo.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_module_config.h" #include "xla/service/symbol_repository.h" #include "xla/service/xla_compile_result.pb.h" #include "xla/shape.h" #include "xla/stream_executor/device_memory_allocator.h" #include "xla/stream_executor/stream_executor.h" #include "xla/stream_executor/stream_executor_memory_allocator.h" #include "xla/tools/hlo_module_loader.h" #include "xla/util.h" #include "tsl/platform/env.h" #include "tsl/platform/env_time.h" #include "tsl/platform/errors.h" #include "tsl/platform/path.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/status.h" #include "tsl/platform/status_to_from_proto.h" #include "tsl/platform/statusor.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "xla/service/gpu/autotuner_util.h" #include "xla/service/gpu/executable.pb.h" #include "xla/service/gpu/gpu_symbol_repository.h" #include "xla/stream_executor/gpu/gpu_init.h" #endif #if GOOGLE_CUDA #include "xla/service/gpu/nvptx_compiler.h" #elif TENSORFLOW_USE_ROCM #include "xla/service/gpu/amdgpu_compiler.h" #endif namespace xla { static absl::StatusOr<std::string> AotCompileCpuExecutable( std::unique_ptr<HloModule> hlo_module) { cpu::CpuCompiler cpu_compiler; auto module_group = std::make_unique<HloModuleGroup>(std::move(hlo_module)); TF_ASSIGN_OR_RETURN( std::vector<std::unique_ptr<Executable>> executables, cpu_compiler.Compile(std::move(module_group), {{nullptr}}, {nullptr})); TF_ASSIGN_OR_RETURN(std::unique_ptr<AotCompilationResult> aot_result, cpu_compiler.Export(executables[0].get())); return aot_result->SerializeAsString(); } static absl::StatusOr<std::string> CompileGpuExecutable( std::unique_ptr<HloModule> hlo_module, std::optional<Compiler::TargetConfig> target_config, CompilationResult& result) { #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM const bool aot = target_config.has_value(); #if GOOGLE_CUDA auto gpu_compiler = gpu::NVPTXCompiler(); #elif TENSORFLOW_USE_ROCM auto gpu_compiler = gpu::AMDGPUCompiler(); #endif auto module_group = std::make_unique<HloModuleGroup>(std::move(hlo_module)); if (aot) { AotCompilationOptions aot_options(gpu_compiler.PlatformId()); aot_options.set_target_config(*target_config); aot_options.set_run_backend_only(true); TF_ASSIGN_OR_RETURN( std::vector<std::unique_ptr<AotCompilationResult>> aot_results, gpu_compiler.CompileAheadOfTime(std::move(module_group), aot_options)); TF_ASSIGN_OR_RETURN(std::string compile_result, aot_results[0]->SerializeAsString()); *result.mutable_hlo_module() = aot_results[0]->optimized_module()->ToProto(); return compile_result; } Compiler::CompileOptions compile_options; TF_RETURN_IF_ERROR(stream_executor::ValidateGPUMachineManager()); TF_ASSIGN_OR_RETURN( stream_executor::StreamExecutor * stream_executor, stream_executor::GPUMachineManager()->ExecutorForDevice(0)); auto allocator = std::make_unique<stream_executor::StreamExecutorMemoryAllocator>( stream_executor); compile_options.device_allocator = allocator.get(); TF_ASSIGN_OR_RETURN( std::vector<std::unique_ptr<Executable>> executables, gpu_compiler.Compile(std::move(module_group), {{stream_executor}}, compile_options)); *result.mutable_hlo_module() = executables[0]->module().ToProto(); return executables[0]->module().ToString(); #else LOG(ERROR) << "Neither ROCm nor CUDA present; returning empty."; return ""; #endif } absl::StatusOr<std::string> CompileExecutable( std::unique_ptr<HloModule> hlo_module, BackendType backend, std::optional<Compiler::TargetConfig> target_config, CompilationResult& result) { if (backend == BackendType::kCpu) { return AotCompileCpuExecutable(std::move(hlo_module)); } return CompileGpuExecutable(std::move(hlo_module), std::move(target_config), result); } absl::Status WriteResultFile(const absl::string_view result_output_file, TimerStats& stats, CompilationResult& compilation_result) { if (result_output_file.empty()) { return absl::OkStatus(); } absl::MutexLock ml(&stats.stats_mutex); const double secs = std::floor(stats.cumulative_secs); const double nanos = (stats.cumulative_secs - secs) * tsl::EnvTime::kSecondsToNanos; google::protobuf::Duration duration; duration.set_seconds(secs); duration.set_nanos(nanos); *compilation_result.mutable_perf_stats()->mutable_compilation_duration() = duration; *compilation_result.mutable_perf_stats()->mutable_total_duration() = duration; return tsl::WriteBinaryProto( tsl::Env::Default(), std::string(result_output_file), compilation_result); } absl::StatusOr<std::unique_ptr<HloModule>> LoadModule( const absl::string_view module_path) { auto format = std::string(tsl::io::Extension(module_path)); if (format == "hlo" || format == "txt" || format == "pb") { return LoadModuleFromFile( std::string(module_path), format, hlo_module_loader_details::Config(), [&](HloModuleConfig* c) {}, nullptr); } std::string module_string; TF_RETURN_IF_ERROR(tsl::ReadFileToString( tsl::Env::Default(), std::string(module_path), &module_string)); mlir::DialectRegistry dialects; dialects.insert<mlir::arith::ArithDialect>(); dialects.insert<mlir::mhlo::MhloDialect>(); dialects.insert<mlir::func::FuncDialect>(); mlir::stablehlo::registerAllDialects(dialects); auto threading = mlir::MLIRContext::Threading::DISABLED; auto ctx = std::make_unique<mlir::MLIRContext>(dialects, threading); mlir::OwningOpRef<mlir::ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(module_string, ctx.get()); XlaComputation xla_computation; TF_RETURN_IF_ERROR( MlirToXlaComputation(*module, xla_computation, false, false)); HloModuleProto hlo_module_proto = xla_computation.proto(); TF_ASSIGN_OR_RETURN(ProgramShape shape, xla_computation.GetProgramShape()); DebugOptions debug_options = GetDebugOptionsFromFlags(); HloModuleConfig config(shape); config.set_debug_options(debug_options); return HloModule::CreateFromProto(hlo_module_proto, config); } static absl::StatusOr<std::unique_ptr<HloModuleAndMetadata>> ReadModuleFromSymbolRepo(absl::string_view symbol_repo, absl::string_view symbol_reference, BackendType backend) { std::unique_ptr<HloModuleAndMetadata> mod; TF_ASSIGN_OR_RETURN( mod, LookupSymbolInRepository(symbol_repo, symbol_reference, backend)); if (mod == nullptr) { return absl::NotFoundError( absl::StrCat("Could not find ", symbol_reference, " in ", symbol_repo)); } return mod; } static absl::StatusOr<bool> LoadAutotuneDataFromModule( HloModuleAndMetadata* mod, BackendType backend) { if (backend == BackendType::kGpu) { #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM if (auto* data = static_cast<gpu::GpuBackendSpecificData*>( mod->backend_specific_data.get()); data != nullptr && data->autotune_results.has_value()) { TF_RETURN_IF_ERROR( gpu::AutotunerUtil::LoadAutotuneResults(*data->autotune_results)); return true; } #endif } return false; } static std::unique_ptr<Compiler::TargetConfig> ReadTargetConfigFromModule( HloModuleAndMetadata* mod, BackendType backend) { if (backend == BackendType::kGpu) { #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM if (auto* data = static_cast<gpu::GpuBackendSpecificData*>( mod->backend_specific_data.get()); data != nullptr) { return std::move(mod->target_config); } #endif } return nullptr; } absl::Status XlaCompileMain(const XlaCompileOptions& options) { std::unique_ptr<HloModule> hlo_module; std::unique_ptr<Compiler::TargetConfig> target_config; if (options.platform != "cpu" && options.platform != "gpu") { return absl::UnimplementedError( absl::StrCat("platform", options.platform, " is not supported")); } const BackendType backend = (options.platform == "gpu" ? BackendType::kGpu : BackendType::kCpu); absl::string_view symbol_repo = options.repo_options.symbol_repo; if (absl::string_view symbol_id = options.repo_options.symbol_id; !symbol_id.empty()) { TF_ASSIGN_OR_RETURN( std::unique_ptr<HloModuleAndMetadata> mod, ReadModuleFromSymbolRepo(symbol_repo, symbol_id, backend)); hlo_module = std::move(mod->hlo_module); target_config = ReadTargetConfigFromModule(mod.get(), backend); } else { TF_ASSIGN_OR_RETURN(hlo_module, LoadModule(options.module_path)); } #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM bool found_autotune = false; #endif if (absl::string_view optimized_symbol_id = options.repo_options.optimized_symbol_id; !optimized_symbol_id.empty()) { TF_ASSIGN_OR_RETURN( std::unique_ptr<HloModuleAndMetadata> optimized_mod, ReadModuleFromSymbolRepo(symbol_repo, optimized_symbol_id, backend)); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM TF_ASSIGN_OR_RETURN(found_autotune, LoadAutotuneDataFromModule( optimized_mod.get(), backend)); #endif } xla::TimerStats stats; xla::ScopedLoggingTimer timer("compilation", true, "xla_compile_main.cc", 1, &stats); CompilationResult compilation_result; absl::Cleanup cleanup([&] { timer.StopAndLog(); if (!options.result_output_file.empty()) { TF_QCHECK_OK(WriteResultFile(options.result_output_file, stats, compilation_result)); } }); std::optional<Compiler::TargetConfig> cfg = std::nullopt; if (backend == BackendType::kGpu) { #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM if (absl::string_view gpu_target_config_path = options.gpu_options.gpu_target_config_path; !gpu_target_config_path.empty()) { std::string gpu_target_config_string; TF_RETURN_IF_ERROR(tsl::ReadFileToString( tsl::Env::Default(), std::string(gpu_target_config_path), &gpu_target_config_string)); stream_executor::GpuTargetConfigProto gpu_target_config_proto; if (!tsl::protobuf::TextFormat::ParseFromString( gpu_target_config_string, &gpu_target_config_proto)) { return FailedPrecondition("Failed to parse GpuTargetConfigProto"); } target_config = std::make_unique<Compiler::TargetConfig>(gpu_target_config_proto); if (absl::string_view autotune_results_path = options.gpu_options.autotune_results_path; !found_autotune && !autotune_results_path.empty()) { TF_RETURN_IF_ERROR(gpu::AutotunerUtil::LoadAutotuneResultsFromFile( autotune_results_path)); } } cfg = (options.gpu_options.use_attached_device) ? std::nullopt : std::make_optional(*std::move(target_config)); #endif } auto result = CompileExecutable(std::move(hlo_module), backend, std::move(cfg), compilation_result); *compilation_result.mutable_status() = tsl::StatusToProto(result.status()); if (!result.ok()) { return result.status(); } TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(), options.output_path, *result)); if (options.repo_options.wait_for_uploads) { MaybeWaitForUploads(); } return absl::OkStatus(); } }
#include "xla/tools/xla_compile_lib.h" #include <memory> #include <optional> #include <string> #include <utility> #include "google/protobuf/duration.pb.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/platform_util.h" #include "xla/service/symbol_repository.h" #include "xla/service/xla_compile_result.pb.h" #include "xla/stream_executor/device_description.pb.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_macros.h" #include "xla/util.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/env.h" #include "tsl/platform/env_time.h" #include "tsl/platform/path.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #include "tsl/protobuf/error_codes.pb.h" #include "tsl/protobuf/status.pb.h" namespace xla { namespace { using ::testing::IsEmpty; using ::testing::IsNull; using ::testing::Not; using ::tsl::testing::IsOk; using ::tsl::testing::IsOkAndHolds; using ::tsl::testing::StatusIs; #if XLA_TEST_BACKEND_CPU static constexpr absl::string_view kPlatformName = "Host"; #elif XLA_TEST_BACKEND_GPU static constexpr absl::string_view kPlatformName = #if TENSORFLOW_USE_ROCM "ROCM"; #else "CUDA"; #endif #endif class XlaCompileLibTest : public HloTestBase { protected: XlaCompileLibTest() : HloTestBase(*PlatformUtil::GetPlatform(std::string(kPlatformName)), GetReferencePlatform()) {} void SetUp() override { const std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "data", "add.hlo"); std::string hlo; TF_ASSERT_OK(tsl::ReadFileToString(tsl::Env::Default(), hlo_path, &hlo)); TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo)); } std::unique_ptr<HloModule> module_; }; TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(CompilesForCpu)) { CompilationResult result; EXPECT_THAT(CompileExecutable(std::move(module_), BackendType::kCpu, std::nullopt, result), IsOkAndHolds(Not(IsEmpty()))); } TEST_F(XlaCompileLibTest, DISABLED_ON_CPU(CompilesForGpuWithDevice)) { CompilationResult result; EXPECT_THAT(CompileExecutable(std::move(module_), BackendType::kGpu, std::nullopt, result), IsOkAndHolds(Not(IsEmpty()))); EXPECT_TRUE(result.has_hlo_module()) << result.DebugString(); } TEST_F(XlaCompileLibTest, DISABLED_ON_CPU(CompilesForGpuWithoutDevice)) { const std::string target_config_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "xla_aot_compile_test_gpu_target_config.prototxt"); stream_executor::GpuTargetConfigProto target_config; TF_ASSERT_OK(tsl::ReadTextProto(tsl::Env::Default(), target_config_path, &target_config)); CompilationResult result; EXPECT_THAT(CompileExecutable(std::move(module_), BackendType::kGpu, std::nullopt, result), IsOkAndHolds(Not(IsEmpty()))); EXPECT_TRUE(result.has_hlo_module()) << result.DebugString(); } TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(ErrorsOnUnexpectedPlatform)) { XlaCompileOptions options; options.platform = "tpu"; EXPECT_THAT(XlaCompileMain(options), StatusIs(tsl::error::UNIMPLEMENTED)); } TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(WriteResultFilePropagatesErrors)) { TimerStats stats; CompilationResult result; EXPECT_THAT(WriteResultFile("/does/not/exist", stats, result), Not(IsOk())); } TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(WriteResultFileWritesTheFile)) { std::string result_output_file; ASSERT_TRUE(tsl::Env::Default()->LocalTempFilename(&result_output_file)); TimerStats stats; { absl::MutexLock ml(&stats.stats_mutex); stats.cumulative_secs = 5.5; stats.max_secs = 5.5; } CompilationResult result; google::protobuf::Duration duration; duration.set_seconds(5); duration.set_nanos(0.5 * tsl::EnvTime::kSecondsToNanos); *result.mutable_perf_stats()->mutable_compilation_duration() = duration; *result.mutable_perf_stats()->mutable_total_duration() = duration; TF_ASSERT_OK(WriteResultFile(result_output_file, stats, result)); CompilationResult got_result; TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), result_output_file, &got_result)); EXPECT_EQ(5, got_result.perf_stats().compilation_duration().seconds()); EXPECT_EQ(0.5 * tsl::EnvTime::kSecondsToNanos, got_result.perf_stats().compilation_duration().nanos()); EXPECT_EQ(5, got_result.perf_stats().total_duration().seconds()); EXPECT_EQ(0.5 * tsl::EnvTime::kSecondsToNanos, got_result.perf_stats().total_duration().nanos()); } TEST_F(XlaCompileLibTest, LoadModuleErrors) { EXPECT_THAT(LoadModule("/does/not/exist"), Not(IsOk())); } TEST_F(XlaCompileLibTest, LoadModuleLoadsTextFormat) { const std::string module_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt"); TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file, module_->ToString())); EXPECT_THAT(LoadModule(module_file), IsOkAndHolds(Not(IsNull()))); } TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(MainForCpu)) { const std::string module_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt"); TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file, module_->ToString())); const std::string output_path = tsl::io::JoinPath(tsl::testing::TmpDir(), "cpu_output"); const std::string result_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "cpu_result.pb"); XlaCompileOptions options; options.module_path = module_file; options.output_path = output_path; options.platform = "cpu"; options.result_output_file = result_file; TF_EXPECT_OK(XlaCompileMain(options)); CompilationResult result; TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), result_file, &result)); EXPECT_TRUE(result.has_status()); EXPECT_EQ(result.status().code(), tensorflow::error::OK); } TEST_F(XlaCompileLibTest, DISABLED_ON_CPU(MainForGpu)) { const std::string module_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt"); TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file, module_->ToString())); const std::string output_path = tsl::io::JoinPath(tsl::testing::TmpDir(), "gpu_output"); const std::string result_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "gpu_result.pb"); XlaCompileOptions options; options.module_path = module_file; options.output_path = output_path; options.platform = "gpu"; options.result_output_file = result_file; options.gpu_options.use_attached_device = true; TF_EXPECT_OK(XlaCompileMain(options)); CompilationResult result; TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), result_file, &result)); EXPECT_TRUE(result.has_status()); EXPECT_EQ(result.status().code(), tensorflow::error::OK); } } }
absl::StatusOr<std::unique_ptr<HloModule>> LoadModule( const absl::string_view module_path) { auto format = std::string(tsl::io::Extension(module_path)); if (format == "hlo" || format == "txt" || format == "pb") { return LoadModuleFromFile( std::string(module_path), format, hlo_module_loader_details::Config(), [&](HloModuleConfig* c) {}, nullptr); } std::string module_string; TF_RETURN_IF_ERROR(tsl::ReadFileToString( tsl::Env::Default(), std::string(module_path), &module_string)); mlir::DialectRegistry dialects; dialects.insert<mlir::arith::ArithDialect>(); dialects.insert<mlir::mhlo::MhloDialect>(); dialects.insert<mlir::func::FuncDialect>(); mlir::stablehlo::registerAllDialects(dialects); auto threading = mlir::MLIRContext::Threading::DISABLED; auto ctx = std::make_unique<mlir::MLIRContext>(dialects, threading); mlir::OwningOpRef<mlir::ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(module_string, ctx.get()); XlaComputation xla_computation; TF_RETURN_IF_ERROR( MlirToXlaComputation(*module, xla_computation, false, false)); HloModuleProto hlo_module_proto = xla_computation.proto(); TF_ASSIGN_OR_RETURN(ProgramShape shape, xla_computation.GetProgramShape()); DebugOptions debug_options = GetDebugOptionsFromFlags(); HloModuleConfig config(shape); config.set_debug_options(debug_options); return HloModule::CreateFromProto(hlo_module_proto, config); }
TEST_F(XlaCompileLibTest, LoadModuleErrors) { EXPECT_THAT(LoadModule("/does/not/exist"), Not(IsOk())); } TEST_F(XlaCompileLibTest, LoadModuleLoadsTextFormat) { const std::string module_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt"); TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file, module_->ToString())); EXPECT_THAT(LoadModule(module_file), IsOkAndHolds(Not(IsNull()))); }
#include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h" #include <stddef.h> #include <stdint.h> #include <cassert> #include <memory> #include <optional> #include <string> #include <string_view> #include <utility> #include <vector> #include "absl/base/optimization.h" #include "absl/status/status.h" #include "absl/strings/cord.h" #include "absl/strings/str_format.h" #include "absl/synchronization/mutex.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include <nlohmann/json.hpp> #include "tensorstore/batch.h" #include "tensorstore/context.h" #include "tensorstore/driver/zarr3/codec/codec_chain_spec.h" #include "tensorstore/index.h" #include "tensorstore/internal/cache/async_cache.h" #include "tensorstore/internal/cache/cache.h" #include "tensorstore/internal/cache/cache_pool_resource.h" #include "tensorstore/internal/cache/kvs_backed_cache.h" #include "tensorstore/internal/cache_key/cache_key.h" #include "tensorstore/internal/data_copy_concurrency_resource.h" #include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h" #include "tensorstore/internal/intrusive_ptr.h" #include "tensorstore/internal/json_binding/bindable.h" #include "tensorstore/internal/json_binding/dimension_indexed.h" #include "tensorstore/internal/json_binding/json_binding.h" #include "tensorstore/internal/mutex.h" #include "tensorstore/json_serialization_options_base.h" #include "tensorstore/kvstore/batch_util.h" #include "tensorstore/kvstore/byte_range.h" #include "tensorstore/kvstore/driver.h" #include "tensorstore/kvstore/generation.h" #include "tensorstore/kvstore/key_range.h" #include "tensorstore/kvstore/kvstore.h" #include "tensorstore/kvstore/operations.h" #include "tensorstore/kvstore/read_modify_write.h" #include "tensorstore/kvstore/read_result.h" #include "tensorstore/kvstore/registry.h" #include "tensorstore/kvstore/spec.h" #include "tensorstore/kvstore/supported_features.h" #include "tensorstore/kvstore/transaction.h" #include "tensorstore/kvstore/zarr3_sharding_indexed/key.h" #include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h" #include "tensorstore/transaction.h" #include "tensorstore/util/bit_vec.h" #include "tensorstore/util/execution/any_receiver.h" #include "tensorstore/util/execution/execution.h" #include "tensorstore/util/execution/flow_sender_operation_state.h" #include "tensorstore/util/executor.h" #include "tensorstore/util/future.h" #include "tensorstore/util/garbage_collection/fwd.h" #include "tensorstore/util/garbage_collection/garbage_collection.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" #include "tensorstore/util/str_cat.h" #include "tensorstore/internal/cache_key/std_vector.h" #include "tensorstore/internal/estimate_heap_usage/std_optional.h" #include "tensorstore/internal/estimate_heap_usage/std_vector.h" #include "tensorstore/serialization/std_vector.h" #include "tensorstore/util/execution/result_sender.h" #include "tensorstore/util/garbage_collection/std_vector.h" namespace tensorstore { namespace zarr3_sharding_indexed { namespace { using ::tensorstore::internal_kvstore::DeleteRangeEntry; using ::tensorstore::internal_kvstore::kReadModifyWrite; using ::tensorstore::kvstore::ListEntry; using ::tensorstore::kvstore::ListReceiver; class ShardIndexKeyValueStore : public kvstore::Driver { public: explicit ShardIndexKeyValueStore(kvstore::DriverPtr base, ShardIndexLocation index_location, int64_t index_size_in_bytes) : base_(std::move(base)), index_location_(index_location), index_size_in_bytes_(index_size_in_bytes) {} Future<kvstore::ReadResult> Read(kvstore::Key key, kvstore::ReadOptions options) override { assert(options.byte_range == OptionalByteRangeRequest{}); switch (index_location_) { case ShardIndexLocation::kStart: options.byte_range = OptionalByteRangeRequest::Range(0, index_size_in_bytes_); break; case ShardIndexLocation::kEnd: options.byte_range = OptionalByteRangeRequest::SuffixLength(index_size_in_bytes_); break; } return MapFutureError( InlineExecutor{}, [](const absl::Status& status) { return internal::ConvertInvalidArgumentToFailedPrecondition(status); }, base_->Read(std::move(key), std::move(options))); } std::string DescribeKey(std::string_view key) override { return tensorstore::StrCat("shard index in ", base_->DescribeKey(key)); } void GarbageCollectionVisit( garbage_collection::GarbageCollectionVisitor& visitor) const final { } kvstore::Driver* base() { return base_.get(); } private: kvstore::DriverPtr base_; ShardIndexLocation index_location_; int64_t index_size_in_bytes_; }; class ShardIndexCache : public internal::KvsBackedCache<ShardIndexCache, internal::AsyncCache> { using Base = internal::KvsBackedCache<ShardIndexCache, internal::AsyncCache>; public: using ReadData = ShardIndex; class Entry : public Base::Entry { public: using OwningCache = ShardIndexCache; size_t ComputeReadDataSizeInBytes(const void* read_data) override { const auto& cache = GetOwningCache(*this); return read_data ? cache.shard_index_params().num_entries * sizeof(uint64_t) * 2 : 0; } std::string GetKeyValueStoreKey() override { return GetOwningCache(*this).base_kvstore_path_; } void DoDecode(std::optional<absl::Cord> value, DecodeReceiver receiver) override { GetOwningCache(*this).executor()( [this, value = std::move(value), receiver = std::move(receiver)]() mutable { std::shared_ptr<ReadData> read_data; if (value) { TENSORSTORE_ASSIGN_OR_RETURN( auto shard_index, DecodeShardIndex(*value, GetOwningCache(*this).shard_index_params()), static_cast<void>(execution::set_error(receiver, _))); read_data = std::make_shared<ReadData>(std::move(shard_index)); } execution::set_value(receiver, std::move(read_data)); }); } }; Entry* DoAllocateEntry() final { return new Entry; } size_t DoGetSizeofEntry() final { return sizeof(Entry); } TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final { ABSL_UNREACHABLE(); } explicit ShardIndexCache(kvstore::DriverPtr base_kvstore, std::string base_kvstore_path, Executor executor, ShardIndexParameters&& params) : Base(kvstore::DriverPtr(new ShardIndexKeyValueStore( std::move(base_kvstore), params.index_location, params.index_codec_state->encoded_size()))), base_kvstore_path_(std::move(base_kvstore_path)), executor_(std::move(executor)), shard_index_params_(std::move(params)) {} ShardIndexKeyValueStore* shard_index_kvstore_driver() { return static_cast<ShardIndexKeyValueStore*>(this->Base::kvstore_driver()); } kvstore::Driver* base_kvstore_driver() { return shard_index_kvstore_driver()->base(); } const std::string& base_kvstore_path() const { return base_kvstore_path_; } const Executor& executor() { return executor_; } span<const Index> grid_shape() const { return span<const Index>(shard_index_params_.index_shape.data(), shard_index_params_.index_shape.size() - 1); } const ShardIndexParameters& shard_index_params() const { return shard_index_params_; } std::string base_kvstore_path_; Executor executor_; ShardIndexParameters shard_index_params_; }; class ShardedKeyValueStoreWriteCache : public internal::KvsBackedCache<ShardedKeyValueStoreWriteCache, internal::AsyncCache> { using Base = internal::KvsBackedCache<ShardedKeyValueStoreWriteCache, internal::AsyncCache>; public: using ReadData = ShardEntries; explicit ShardedKeyValueStoreWriteCache( internal::CachePtr<ShardIndexCache> shard_index_cache) : Base(kvstore::DriverPtr(shard_index_cache->base_kvstore_driver())), shard_index_cache_(std::move(shard_index_cache)) {} class Entry : public Base::Entry { public: using OwningCache = ShardedKeyValueStoreWriteCache; size_t ComputeReadDataSizeInBytes(const void* data) override { return internal::EstimateHeapUsage(*static_cast<const ReadData*>(data)); } void DoDecode(std::optional<absl::Cord> value, DecodeReceiver receiver) override { GetOwningCache(*this).executor()( [this, value = std::move(value), receiver = std::move(receiver)]() mutable { ShardEntries entries; const auto& shard_index_params = GetOwningCache(*this).shard_index_params(); if (value) { TENSORSTORE_ASSIGN_OR_RETURN( entries, DecodeShard(*value, shard_index_params), static_cast<void>(execution::set_error(receiver, _))); } else { entries.entries.resize(shard_index_params.num_entries); } execution::set_value( receiver, std::make_shared<ShardEntries>(std::move(entries))); }); } void DoEncode(std::shared_ptr<const ShardEntries> data, EncodeReceiver receiver) override { TENSORSTORE_ASSIGN_OR_RETURN( auto encoded_shard, EncodeShard(*data, GetOwningCache(*this).shard_index_params()), static_cast<void>(execution::set_error(receiver, _))); execution::set_value(receiver, std::move(encoded_shard)); } std::string GetKeyValueStoreKey() override { return GetOwningCache(*this).base_kvstore_path(); } }; class TransactionNode : public Base::TransactionNode, public internal_kvstore::AtomicMultiPhaseMutation { public: using OwningCache = ShardedKeyValueStoreWriteCache; using Base::TransactionNode::TransactionNode; absl::Mutex& mutex() override { return this->mutex_; } void PhaseCommitDone(size_t next_phase) override {} internal::TransactionState::Node& GetTransactionNode() override { return *this; } void Abort() override { this->AbortRemainingPhases(); Base::TransactionNode::Abort(); } std::string DescribeKey(std::string_view key) override { auto& cache = GetOwningCache(*this); return tensorstore::StrCat( DescribeInternalKey(key, cache.shard_index_params().grid_shape()), " in ", cache.kvstore_driver()->DescribeKey(cache.base_kvstore_path())); } void DoApply(ApplyOptions options, ApplyReceiver receiver) override; void StartApply(); void AllEntriesDone( internal_kvstore::SinglePhaseMutation& single_phase_mutation) override; void MergeForWriteback(bool conditional); void RecordEntryWritebackError( internal_kvstore::ReadModifyWriteEntry& entry, absl::Status error) override { absl::MutexLock lock(&mutex_); if (apply_status_.ok()) { apply_status_ = std::move(error); } } void Revoke() override { Base::TransactionNode::Revoke(); { UniqueWriterLock(*this); } this->RevokeAllEntries(); } void WritebackSuccess(ReadState&& read_state) override; void WritebackError() override; void InvalidateReadState() override; bool MultiPhaseReadsCommitted() override { return this->reads_committed_; } void Read( internal_kvstore::ReadModifyWriteEntry& entry, kvstore::ReadModifyWriteTarget::TransactionalReadOptions&& options, kvstore::ReadModifyWriteTarget::ReadReceiver&& receiver) override { this->AsyncCache::TransactionNode::Read({options.staleness_bound}) .ExecuteWhenReady(WithExecutor( GetOwningCache(*this).executor(), [&entry, if_not_equal = std::move(options.generation_conditions.if_not_equal), receiver = std::move(receiver)]( ReadyFuture<const void> future) mutable { if (!future.result().ok()) { execution::set_error(receiver, future.result().status()); return; } execution::submit(HandleShardReadSuccess(entry, if_not_equal), receiver); })); } static Result<kvstore::ReadResult> HandleShardReadSuccess( internal_kvstore::ReadModifyWriteEntry& entry, const StorageGeneration& if_not_equal) { auto& self = static_cast<TransactionNode&>(entry.multi_phase()); TimestampedStorageGeneration stamp; std::shared_ptr<const ShardEntries> entries; { AsyncCache::ReadLock<ShardEntries> lock{self}; stamp = lock.stamp(); entries = lock.shared_data(); } if (!StorageGeneration::IsUnknown(stamp.generation) && stamp.generation == if_not_equal) { return kvstore::ReadResult::Unspecified(std::move(stamp)); } if (StorageGeneration::IsDirty(stamp.generation)) { stamp.generation = StorageGeneration::AddLayer(std::move(stamp.generation)); } auto entry_id = InternalKeyToEntryId(entry.key_); const auto& shard_entry = entries->entries[entry_id]; if (!shard_entry) { return kvstore::ReadResult::Missing(std::move(stamp)); } else { return kvstore::ReadResult::Value(*shard_entry, std::move(stamp)); } } ApplyReceiver apply_receiver_; ApplyOptions apply_options_; absl::Status apply_status_; }; Entry* DoAllocateEntry() final { return new Entry; } size_t DoGetSizeofEntry() final { return sizeof(Entry); } TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final { return new TransactionNode(static_cast<Entry&>(entry)); } const internal::CachePtr<ShardIndexCache>& shard_index_cache() const { return shard_index_cache_; } const Executor& executor() { return shard_index_cache()->executor(); } const ShardIndexParameters& shard_index_params() const { return shard_index_cache_->shard_index_params(); } int64_t num_entries_per_shard() const { return shard_index_cache_->shard_index_params().num_entries; } const std::string& base_kvstore_path() const { return shard_index_cache_->base_kvstore_path(); } internal::CachePtr<ShardIndexCache> shard_index_cache_; }; void ShardedKeyValueStoreWriteCache::TransactionNode::InvalidateReadState() { Base::TransactionNode::InvalidateReadState(); internal_kvstore::InvalidateReadState(phases_); } void ShardedKeyValueStoreWriteCache::TransactionNode::DoApply( ApplyOptions options, ApplyReceiver receiver) { apply_receiver_ = std::move(receiver); apply_options_ = options; apply_status_ = absl::OkStatus(); GetOwningCache(*this).executor()([this] { this->StartApply(); }); } void ShardedKeyValueStoreWriteCache::TransactionNode::StartApply() { RetryAtomicWriteback(apply_options_.staleness_bound); } void ShardedKeyValueStoreWriteCache::TransactionNode::AllEntriesDone( internal_kvstore::SinglePhaseMutation& single_phase_mutation) { if (!apply_status_.ok()) { execution::set_error(std::exchange(apply_receiver_, {}), std::exchange(apply_status_, {})); return; } auto& self = *this; GetOwningCache(*this).executor()([&self] { TimestampedStorageGeneration stamp; bool mismatch = false; bool modified = false; int64_t num_entries = 0; auto& cache = GetOwningCache(self); const int64_t num_entries_per_shard = cache.num_entries_per_shard(); for (auto& entry : self.phases_.entries_) { if (entry.entry_type() != kReadModifyWrite) { auto& dr_entry = static_cast<DeleteRangeEntry&>(entry); auto [begin_id, end_id] = InternalKeyRangeToEntryRange( dr_entry.key_, dr_entry.exclusive_max_, num_entries_per_shard); modified = true; num_entries += end_id - begin_id; continue; } auto& buffered_entry = static_cast<AtomicMultiPhaseMutation::BufferedReadModifyWriteEntry&>( entry); if (buffered_entry.value_state_ != kvstore::ReadResult::kUnspecified) { modified = true; ++num_entries; } auto& entry_stamp = buffered_entry.stamp(); if (StorageGeneration::IsConditional(entry_stamp.generation)) { if (!StorageGeneration::IsUnknown(stamp.generation) && StorageGeneration::Clean(stamp.generation) != StorageGeneration::Clean(entry_stamp.generation)) { mismatch = true; break; } else { stamp = entry_stamp; } } } if (mismatch) { self.apply_options_.staleness_bound = absl::Now(); self.StartApply(); return; } if (!modified && StorageGeneration::IsUnknown(stamp.generation) && self.apply_options_.apply_mode != ApplyOptions::ApplyMode::kSpecifyUnchanged) { internal::AsyncCache::ReadState update; update.stamp = TimestampedStorageGeneration::Unconditional(); execution::set_value(std::exchange(self.apply_receiver_, {}), std::move(update)); return; } if (!StorageGeneration::IsUnknown(stamp.generation) || num_entries != num_entries_per_shard) { self.internal::AsyncCache::TransactionNode::Read( {self.apply_options_.staleness_bound}) .ExecuteWhenReady([&self](ReadyFuture<const void> future) { if (!future.result().ok()) { execution::set_error(std::exchange(self.apply_receiver_, {}), future.result().status()); return; } GetOwningCache(self).executor()( [&self] { self.MergeForWriteback(true); }); }); return; } self.MergeForWriteback(false); }); } void ShardedKeyValueStoreWriteCache::TransactionNode::MergeForWriteback( bool conditional) { TimestampedStorageGeneration stamp; ShardEntries new_entries; if (conditional) { auto lock = internal::AsyncCache::ReadLock<ShardEntries>{*this}; stamp = lock.stamp(); new_entries = *lock.shared_data(); } else { stamp = TimestampedStorageGeneration::Unconditional(); } auto& cache = GetOwningCache(*this); const int64_t num_entries_per_shard = cache.num_entries_per_shard(); const bool has_existing_entries = !new_entries.entries.empty(); new_entries.entries.resize(num_entries_per_shard); bool mismatch = false; bool changed = false; for (auto& entry : phases_.entries_) { if (entry.entry_type() != kReadModifyWrite) { auto& dr_entry = static_cast<DeleteRangeEntry&>(entry); auto [begin_id, end_id] = InternalKeyRangeToEntryRange( dr_entry.key_, dr_entry.exclusive_max_, num_entries_per_shard); if (has_existing_entries) { for (EntryId id = begin_id; id < end_id; ++id) { new_entries.entries[id] = std::nullopt; } } changed = true; continue; } auto& buffered_entry = static_cast<internal_kvstore::AtomicMultiPhaseMutation:: BufferedReadModifyWriteEntry&>(entry); auto& entry_stamp = buffered_entry.stamp(); if (StorageGeneration::IsConditional(entry_stamp.generation) && StorageGeneration::Clean(entry_stamp.generation) != StorageGeneration::Clean(stamp.generation)) { mismatch = true; break; } if (buffered_entry.value_state_ == kvstore::ReadResult::kUnspecified || !StorageGeneration::IsInnerLayerDirty(entry_stamp.generation)) { continue; } auto entry_id = InternalKeyToEntryId(buffered_entry.key_); auto& new_entry = new_entries.entries[entry_id]; if (buffered_entry.value_state_ == kvstore::ReadResult::kValue) { new_entry = buffered_entry.value_; changed = true; } else if (new_entry) { new_entry = std::nullopt; changed = true; } else if (!conditional) { changed = true; } } if (mismatch) { apply_options_.staleness_bound = absl::Now(); this->StartApply(); return; } internal::AsyncCache::ReadState update; update.stamp = std::move(stamp); if (changed) { update.stamp.generation.MarkDirty(); } update.data = std::make_shared<ShardEntries>(std::move(new_entries)); execution::set_value(std::exchange(apply_receiver_, {}), std::move(update)); } void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackSuccess( ReadState&& read_state) { for (auto& entry : phases_.entries_) { if (entry.entry_type() != kReadModifyWrite) { internal_kvstore::WritebackSuccess(static_cast<DeleteRangeEntry&>(entry)); } else { internal_kvstore::WritebackSuccess( static_cast<internal_kvstore::ReadModifyWriteEntry&>(entry), read_state.stamp); } } internal_kvstore::DestroyPhaseEntries(phases_); Base::TransactionNode::WritebackSuccess(std::move(read_state)); } void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackError() { internal_kvstore::WritebackError(phases_); internal_kvstore::DestroyPhaseEntries(phases_); Base::TransactionNode::WritebackError(); } struct ShardedKeyValueStoreSpecData { Context::Resource<internal::CachePoolResource> cache_pool; Context::Resource<internal::DataCopyConcurrencyResource> data_copy_concurrency; kvstore::Spec base; std::vector<Index> grid_shape; internal_zarr3::ZarrCodecChainSpec index_codecs; ShardIndexLocation index_location; TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ShardedKeyValueStoreSpecData, internal_json_binding::NoOptions, IncludeDefaults, ::nlohmann::json::object_t) constexpr static auto ApplyMembers = [](auto&& x, auto f) { return f(x.cache_pool, x.data_copy_concurrency, x.base, x.grid_shape, x.index_codecs, x.index_location); }; }; namespace jb = ::tensorstore::internal_json_binding; TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER( ShardedKeyValueStoreSpecData, jb::Object( jb::Member("base", jb::Projection<&ShardedKeyValueStoreSpecData::base>()), jb::Member( "grid_shape", jb::Projection<&ShardedKeyValueStoreSpecData::grid_shape>( jb::Validate([](const auto& options, auto* obj) { return ValidateGridShape(*obj); }, jb::ChunkShapeVector(nullptr)))), jb::Member("index_codecs", jb::Projection<&ShardedKeyValueStoreSpecData::index_codecs>( internal_zarr3::ZarrCodecChainJsonBinder< false>)), jb::Member( "index_location", jb::Projection<&ShardedKeyValueStoreSpecData::index_location>( jb::DefaultValue<jb::kAlwaysIncludeDefaults>([](auto* x) { *x = ShardIndexLocation::kEnd; }))), jb::Member(internal::CachePoolResource::id, jb::Projection<&ShardedKeyValueStoreSpecData::cache_pool>()), jb::Member( internal::DataCopyConcurrencyResource::id, jb::Projection< &ShardedKeyValueStoreSpecData::data_copy_concurrency>()))); class ShardedKeyValueStoreSpec : public internal_kvstore::RegisteredDriverSpec< ShardedKeyValueStoreSpec, ShardedKeyValueStoreSpecData> { public: static constexpr char id[] = "zarr3_sharding_indexed"; Future<kvstore::DriverPtr> DoOpen() const override; Result<kvstore::Spec> GetBase(std::string_view path) const override { return data_.base; } }; class ShardedKeyValueStore : public internal_kvstore::RegisteredDriver<ShardedKeyValueStore, ShardedKeyValueStoreSpec> { public: explicit ShardedKeyValueStore(ShardedKeyValueStoreParameters&& params, std::string_view shared_cache_key = {}); Future<ReadResult> Read(Key key, ReadOptions options) override; void ListImpl(ListOptions options, ListReceiver receiver) override; Future<TimestampedStorageGeneration> Write(Key key, std::optional<Value> value, WriteOptions options) override; absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction, size_t& phase, Key key, ReadModifyWriteSource& source) override; absl::Status TransactionalDeleteRange( const internal::OpenTransactionPtr& transaction, KeyRange range) override; Future<const void> DeleteRange(KeyRange range) override; std::string DescribeKey(std::string_view key) override; kvstore::SupportedFeatures GetSupportedFeatures( const KeyRange& key_range) const final; Result<KvStore> GetBase(std::string_view path, const Transaction& transaction) const override; kvstore::Driver* base_kvstore_driver() const { return shard_index_cache()->base_kvstore_driver(); } const ShardIndexParameters& shard_index_params() const { return shard_index_cache()->shard_index_
#include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h" #include <stddef.h> #include <stdint.h> #include <functional> #include <initializer_list> #include <map> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/container/flat_hash_map.h" #include "absl/log/absl_check.h" #include "absl/random/random.h" #include "absl/status/status.h" #include "absl/strings/cord.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include "re2/re2.h" #include "riegeli/bytes/cord_writer.h" #include "riegeli/bytes/write.h" #include "riegeli/digests/crc32c_digester.h" #include "tensorstore/batch.h" #include "tensorstore/context.h" #include "tensorstore/driver/zarr3/codec/codec_chain_spec.h" #include "tensorstore/index.h" #include "tensorstore/internal/cache/cache.h" #include "tensorstore/internal/cache/kvs_backed_cache_testutil.h" #include "tensorstore/internal/global_initializer.h" #include "tensorstore/internal/intrusive_ptr.h" #include "tensorstore/internal/riegeli/digest_suffixed_writer.h" #include "tensorstore/internal/testing/scoped_directory.h" #include "tensorstore/internal/thread/thread_pool.h" #include "tensorstore/kvstore/byte_range.h" #include "tensorstore/kvstore/generation.h" #include "tensorstore/kvstore/kvstore.h" #include "tensorstore/kvstore/memory/memory_key_value_store.h" #include "tensorstore/kvstore/mock_kvstore.h" #include "tensorstore/kvstore/operations.h" #include "tensorstore/kvstore/read_result.h" #include "tensorstore/kvstore/spec.h" #include "tensorstore/kvstore/test_matchers.h" #include "tensorstore/kvstore/test_util.h" #include "tensorstore/kvstore/zarr3_sharding_indexed/key.h" #include "tensorstore/transaction.h" #include "tensorstore/util/executor.h" #include "tensorstore/util/extents.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" #include "tensorstore/util/status_testutil.h" #include "tensorstore/util/str_cat.h" namespace { namespace kvstore = ::tensorstore::kvstore; using ::tensorstore::Batch; using ::tensorstore::Executor; using ::tensorstore::Future; using ::tensorstore::Index; using ::tensorstore::KvStore; using ::tensorstore::MatchesStatus; using ::tensorstore::OptionalByteRangeRequest; using ::tensorstore::Result; using ::tensorstore::span; using ::tensorstore::StorageGeneration; using ::tensorstore::TimestampedStorageGeneration; using ::tensorstore::Transaction; using ::tensorstore::internal::CachePool; using ::tensorstore::internal::GetCache; using ::tensorstore::internal::KvsBackedTestCache; using ::tensorstore::internal::MatchesKvsReadResult; using ::tensorstore::internal::MatchesKvsReadResultNotFound; using ::tensorstore::internal::MatchesTimestampedStorageGeneration; using ::tensorstore::internal::MockKeyValueStore; using ::tensorstore::internal::UniqueNow; using ::tensorstore::internal_zarr3::ZarrCodecChainSpec; using ::tensorstore::kvstore::ReadResult; using ::tensorstore::zarr3_sharding_indexed::EntryId; using ::tensorstore::zarr3_sharding_indexed::EntryIdToKey; using ::tensorstore::zarr3_sharding_indexed::GetShardedKeyValueStore; using ::tensorstore::zarr3_sharding_indexed::ShardedKeyValueStoreParameters; using ::tensorstore::zarr3_sharding_indexed::ShardIndexLocation; constexpr CachePool::Limits kSmallCacheLimits{10000000}; absl::Cord Bytes(std::initializer_list<unsigned char> x) { return absl::Cord(std::string(x.begin(), x.end())); } absl::Cord WithCrc32c(absl::Cord input) { absl::Cord output; riegeli::CordWriter writer{&output}; TENSORSTORE_CHECK_OK(riegeli::Write( input, tensorstore::internal::DigestSuffixedWriter< riegeli::Crc32cDigester, tensorstore::internal::LittleEndianDigestWriter>{&writer})); ABSL_CHECK(writer.Close()); return output; } class GetKey { public: GetKey(bool sequential, std::vector<Index> grid_shape) : sequential_(sequential), grid_shape_(std::move(grid_shape)), num_entries_( tensorstore::ProductOfExtents(span<const Index>(grid_shape_))) {} std::string operator()(std::string key) const { auto it = key_to_entry_id_.find(key); if (it == key_to_entry_id_.end()) { ABSL_CHECK_LT(entry_id_to_key_.size(), num_entries_); while (true) { auto x = sequential_ ? next_entry_id_++ : absl::Uniform<EntryId>(gen_); x = x % num_entries_; if (entry_id_to_key_.emplace(x, key).second) { it = key_to_entry_id_.emplace(key, x).first; break; } } } return EntryIdToKey(it->second, grid_shape_); } private: bool sequential_; std::vector<Index> grid_shape_; EntryId num_entries_; mutable EntryId next_entry_id_ = 0; mutable absl::BitGen gen_; mutable absl::flat_hash_map<std::string, EntryId> key_to_entry_id_; mutable absl::flat_hash_map<EntryId, std::string> entry_id_to_key_; }; kvstore::DriverPtr GetDefaultStore(kvstore::DriverPtr base_kvstore, std::string base_kvstore_path, Executor executor, CachePool::StrongPtr cache_pool, const std::vector<Index>& grid_shape) { ShardedKeyValueStoreParameters params; params.base_kvstore = base_kvstore; params.base_kvstore_path = base_kvstore_path; params.executor = executor; params.cache_pool = CachePool::WeakPtr(cache_pool); TENSORSTORE_CHECK_OK_AND_ASSIGN( auto index_codecs, ZarrCodecChainSpec::FromJson( {{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}, {{"name", "crc32c"}}})); params.index_params.index_location = ShardIndexLocation::kEnd; TENSORSTORE_CHECK_OK( params.index_params.Initialize(index_codecs, grid_shape)); return GetShardedKeyValueStore(std::move(params)); } TEST(ShardedKeyValueStoreTest, BasicFunctionality) { std::vector<std::pair<std::string, tensorstore::Executor>> executors{ {"inline", tensorstore::InlineExecutor{}}, {"thread_pool", tensorstore::internal::DetachedThreadPool(2)}}; for (const auto& [executor_name, executor] : executors) { for (const auto sequential_ids : {true, false}) { auto cache_pool = CachePool::Make(kSmallCacheLimits); auto base_kv_store = tensorstore::GetMemoryKeyValueStore(); const int64_t num_entries = 100; SCOPED_TRACE(executor_name); auto store = GetDefaultStore(base_kv_store, "shard_path", executor, cache_pool, {num_entries}); GetKey get_key_fn(sequential_ids, {num_entries}); tensorstore::internal::TestKeyValueReadWriteOps(store, get_key_fn); } } } TEST(ShardedKeyValueStoreTest, DescribeKey) { CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits); kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore(); int64_t num_entries = 100; std::vector<Index> grid_shape{num_entries}; kvstore::DriverPtr store = GetDefaultStore(base_kv_store, "shard_path", tensorstore::InlineExecutor{}, cache_pool, grid_shape); for (const auto& [key, description] : std::vector<std::pair<uint32_t, std::string>>{ {0, "shard entry {0}/{100} in \"shard_path\""}, {1, "shard entry {1}/{100} in \"shard_path\""}, }) { EXPECT_EQ(description, store->DescribeKey(EntryIdToKey(key, grid_shape))); } } class RawEncodingTest : public ::testing::Test { protected: CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits); kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore(); kvstore::DriverPtr GetStore(const std::vector<Index>& grid_shape) { return GetDefaultStore(base_kv_store, "shard_path", tensorstore::InlineExecutor{}, cache_pool, grid_shape); } }; TEST_F(RawEncodingTest, MultipleUnconditionalWrites) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); std::vector<absl::Cord> values{absl::Cord("abc"), absl::Cord("aaaaa"), absl::Cord("efgh")}; std::vector<Future<TimestampedStorageGeneration>> futures; auto key = EntryIdToKey(10, grid_shape); tensorstore::Transaction txn(tensorstore::isolated); for (auto value : values) { futures.push_back(kvstore::WriteCommitted(KvStore{store, txn}, key, value)); } txn.CommitAsync().IgnoreFuture(); std::vector<Result<TimestampedStorageGeneration>> results; for (const auto& future : futures) { results.push_back(future.result()); } TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto shard_read, base_kv_store->Read("shard_path").result()); EXPECT_THAT( results, ::testing::UnorderedElementsAre( MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()), MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()), MatchesTimestampedStorageGeneration(shard_read.stamp.generation))); for (size_t i = 0; i < results.size(); ++i) { if (results[i] && results[i]->generation == shard_read.stamp.generation) { EXPECT_THAT(store->Read(key).result(), MatchesKvsReadResult(values[i], results[i]->generation)); } } } TEST_F(RawEncodingTest, List) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); std::map<std::string, absl::Cord> values{ {EntryIdToKey(1, grid_shape), absl::Cord("a")}, {EntryIdToKey(2, grid_shape), absl::Cord("bc")}, {EntryIdToKey(3, grid_shape), absl::Cord("def")}, {EntryIdToKey(10, grid_shape), absl::Cord("xyz")}}; for (auto [key, value] : values) { TENSORSTORE_EXPECT_OK(store->Write(key, value)); } EXPECT_THAT(tensorstore::internal::GetMap(store), ::testing::Optional(::testing::ElementsAreArray(values))); } TEST_F(RawEncodingTest, WritesAndDeletes) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); StorageGeneration gen1, gen2, gen3; { tensorstore::Transaction txn(tensorstore::isolated); auto init_future1 = kvstore::WriteCommitted( KvStore{store, txn}, EntryIdToKey(1, grid_shape), absl::Cord("a")); auto init_future2 = kvstore::WriteCommitted( KvStore{store, txn}, EntryIdToKey(2, grid_shape), absl::Cord("bc")); auto init_future3 = kvstore::WriteCommitted( KvStore{store, txn}, EntryIdToKey(3, grid_shape), absl::Cord("def")); txn.CommitAsync().IgnoreFuture(); gen1 = init_future1.value().generation; gen2 = init_future2.value().generation; gen3 = init_future3.value().generation; } tensorstore::Transaction txn(tensorstore::isolated); auto future1 = kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape), {StorageGeneration::NoValue()}); auto future2 = kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape), absl::Cord("ww"), {gen2}); auto future3 = kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape), absl::Cord("xx"), {gen2}); auto future4 = kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(4, grid_shape), absl::Cord("zz"), {StorageGeneration::NoValue()}); auto future5 = kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(3, grid_shape), {gen3}); txn.CommitAsync().IgnoreFuture(); EXPECT_THAT(future1.result(), MatchesTimestampedStorageGeneration( StorageGeneration::Unknown())); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto shard_read, base_kv_store->Read("shard_path").result()); EXPECT_THAT( std::vector({future2.result(), future3.result()}), ::testing::UnorderedElementsAre( MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()), MatchesTimestampedStorageGeneration(shard_read.stamp.generation))); EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(), MatchesKvsReadResult(absl::Cord("a"))); EXPECT_THAT(store->Read(EntryIdToKey(2, grid_shape)).result(), MatchesKvsReadResult( !StorageGeneration::IsUnknown(future2.result()->generation) ? absl::Cord("ww") : absl::Cord("xx"))); EXPECT_THAT(store->Read(EntryIdToKey(3, grid_shape)).result(), MatchesKvsReadResultNotFound()); EXPECT_THAT(store->Read(EntryIdToKey(4, grid_shape)).result(), MatchesKvsReadResult(absl::Cord("zz"))); } std::vector<std::vector<Result<TimestampedStorageGeneration>>> TestOrderDependentWrites( std::function<void()> init, std::function<Future<TimestampedStorageGeneration>()> op0, std::function<Future<TimestampedStorageGeneration>()> op1, std::function<void()> finalize) { std::vector<std::vector<Result<TimestampedStorageGeneration>>> all_results; for (int i = 0; i < 2; ++i) { std::vector<Future<TimestampedStorageGeneration>> futures(2); init(); if (i == 0) { futures[0] = op0(); futures[1] = op1(); } else { futures[1] = op1(); futures[0] = op0(); } finalize(); all_results.push_back({futures[0].result(), futures[1].result()}); } return all_results; } TEST_F(RawEncodingTest, WriteThenDelete) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); TENSORSTORE_ASSERT_OK( store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a")).result()); EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(), MatchesKvsReadResult(absl::Cord("a"))); TENSORSTORE_ASSERT_OK(store->Delete(EntryIdToKey(1, grid_shape)).result()); EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(), MatchesKvsReadResultNotFound()); } TEST_F(RawEncodingTest, MultipleDeleteExisting) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); StorageGeneration gen; tensorstore::Transaction txn{tensorstore::no_transaction}; EXPECT_THAT( TestOrderDependentWrites( [&] { gen = store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a")) .value() .generation; txn = tensorstore::Transaction(tensorstore::isolated); }, [&] { return kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape), {gen}); }, [&] { return kvstore::DeleteCommitted( KvStore{store, txn}, EntryIdToKey(1, grid_shape), {StorageGeneration::NoValue()}); }, [&] { txn.CommitAsync().IgnoreFuture(); }), ::testing::UnorderedElementsAre( ::testing::ElementsAre( MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()), MatchesTimestampedStorageGeneration( StorageGeneration::NoValue())), ::testing::ElementsAre( MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()), MatchesTimestampedStorageGeneration( StorageGeneration::Unknown())))); } TEST_F(RawEncodingTest, WriteWithUnmatchedConditionAfterDelete) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); tensorstore::Transaction txn{tensorstore::no_transaction}; EXPECT_THAT( TestOrderDependentWrites( [&] { store->Delete(EntryIdToKey(0, grid_shape)).value(); txn = tensorstore::Transaction(tensorstore::isolated); }, [&] { return kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(0, grid_shape), absl::Cord("a")); }, [&] { return kvstore::WriteCommitted( KvStore{store, txn}, EntryIdToKey(0, grid_shape), absl::Cord("b"), {StorageGeneration::FromString("g")}); }, [&] { txn.CommitAsync().IgnoreFuture(); }), ::testing::Each(::testing::ElementsAre( MatchesTimestampedStorageGeneration( ::testing::AllOf(::testing::Not(StorageGeneration::NoValue()), ::testing::Not(StorageGeneration::Invalid()))), MatchesTimestampedStorageGeneration(StorageGeneration::Unknown())))); } TEST_F(RawEncodingTest, MultipleDeleteNonExisting) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); tensorstore::Transaction txn(tensorstore::isolated); std::vector futures{ kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape), {StorageGeneration::NoValue()}), kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape), {StorageGeneration::NoValue()})}; txn.CommitAsync().IgnoreFuture(); std::vector results{futures[0].result(), futures[1].result()}; EXPECT_THAT( results, ::testing::UnorderedElementsAre( MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()), MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()))); } TEST_F(RawEncodingTest, ShardIndexTooShort) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); base_kv_store->Write("shard_path", Bytes({1, 2, 3})).value(); EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(), MatchesStatus( absl::StatusCode::kFailedPrecondition, RE2::QuoteMeta("Error reading shard index in \"shard_path\": " "Requested byte range [-1604, ?) is not valid " "for value of size 3"))); EXPECT_THAT( store->Write(EntryIdToKey(10, grid_shape), absl::Cord("abc")).result(), MatchesStatus(absl::StatusCode::kDataLoss, "Error reading \"shard_path\": " "Existing shard has size of 3 bytes, but expected at least " "1604 bytes")); } TEST_F(RawEncodingTest, ShardIndexByteRangeOverflow) { std::vector<Index> grid_shape{2}; kvstore::DriverPtr store = GetStore(grid_shape); auto content = WithCrc32c(Bytes({ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, })); TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content)); EXPECT_THAT( store->Read(EntryIdToKey(1, grid_shape)).result(), MatchesStatus(absl::StatusCode::kDataLoss, "Error reading shard index in \"shard_path\": " "Invalid shard index entry 1 with offset=.*, length=.*")); } TEST_F(RawEncodingTest, ShardIndexEntryByteRangeOutOfRange) { std::vector<Index> grid_shape{2}; kvstore::DriverPtr store = GetStore(grid_shape); auto content = WithCrc32c(Bytes({ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0, 0, 0, 0, 0, 0, })); TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content)); EXPECT_THAT( store->Write(EntryIdToKey(1, grid_shape), absl::Cord("x")).result(), MatchesStatus(absl::StatusCode::kDataLoss, "Error reading \"shard_path\": " "Shard index entry 1 with byte range .* is invalid " "for shard of size .*")); } TEST_F(RawEncodingTest, ShardIndexInvalidChecksum) { std::vector<Index> grid_shape{2}; kvstore::DriverPtr store = GetStore(grid_shape); auto content = Bytes({ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, }); content.Append("abcd"); TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content)); EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(), MatchesStatus(absl::StatusCode::kDataLoss, "Error reading shard index in \"shard_path\": " "Digest mismatch.*")); } class UnderlyingKeyValueStoreTest : public ::testing::Test { protected: CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits); MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make(); kvstore::DriverPtr GetStore(std::vector<Index> grid_shape) { return GetDefaultStore(mock_store, "shard_path", tensorstore::InlineExecutor{}, cache_pool, grid_shape); } std::vector<Index> grid_shape{5}; kvstore::DriverPtr store = GetStore(grid_shape); }; TEST_F(UnderlyingKeyValueStoreTest, Read) { absl::Time init_time = UniqueNow(); absl::Time shard_index_time; { auto future = store->Read(EntryIdToKey(2, grid_shape), {}); { auto req = mock_store->read_requests.pop_nonblock().value(); ASSERT_EQ(0, mock_store->read_requests.size()); EXPECT_EQ("shard_path", req.key); EXPECT_EQ(StorageGeneration::Unknown(), req.options.generation_conditions.if_not_equal); EXPECT_EQ(StorageGeneration::Unknown(), req.options.generation_conditions.if_equal); EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4), req.options.byte_range); EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(init_time)); shard_index_time = absl::Now(); req.promise.SetResult( ReadResult{ReadResult::kValue, WithCrc32c(Bytes({ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 10, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, })), {StorageGeneration::FromString("g0"), shard_index_time}}); } ASSERT_FALSE(future.ready()) << future.status(); absl::Time read_time; { auto req = mock_store->read_requests.pop_nonblock().value(); ASSERT_EQ(0, mock_store->read_requests.size()); EXPECT_EQ("shard_path", req.key); EXPECT_EQ(StorageGeneration::Unknown(), req.options.generation_conditions.if_not_equal); EXPECT_EQ(StorageGeneration::FromString("g0"), req.options.generation_conditions.if_equal); EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range); read_time = absl::Now(); req.promise.SetResult( ReadResult{ReadResult::kValue, Bytes({5, 6, 7, 8, 9}), {StorageGeneration::FromString("g0"), read_time}}); } ASSERT_EQ(0, mock_store->read_requests.size()); ASSERT_TRUE(future.ready()); EXPECT_THAT( future.result(), MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}), StorageGeneration::FromString("g0"), read_time)); } { kvstore::ReadOptions options; options.staleness_bound = init_time; auto future = store->Read(EntryIdToKey(3, grid_shape), options); ASSERT_TRUE(future.ready()); EXPECT_THAT(future.result(), MatchesKvsReadResultNotFound(shard_index_time)); } { auto req_time = UniqueNow(); auto future = store->Read(EntryIdToKey(3, grid_shape), {}); { auto req = mock_store->read_requests.pop_nonblock().value(); ASSERT_EQ(0, mock_store->read_requests.size()); EXPECT_EQ("shard_path", req.key); EXPECT_EQ(StorageGeneration::FromString("g0"), req.options.generation_conditions.if_not_equal); EXPECT_EQ(StorageGeneration::Unknown(), req.options.generation_conditions.if_equal); EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4), req.options.byte_range); EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time)); shard_index_time = absl::Now(); req.promise.SetResult(ReadResult::Unspecified( {StorageGeneration::FromString("g0"), shard_index_time})); } ASSERT_TRUE(future.ready()); EXPECT_THAT(future.result(), MatchesKvsReadResultNotFound(shard_index_time)); } { kvstore::ReadOptions options; options.staleness_bound = init_time; auto future = store->Read(EntryIdToKey(2, grid_shape), options); absl::Time read_time; { auto req = mock_store->read_requests.pop_nonblock().value(); ASSERT_EQ(0, mock_store->read_requests.size()); EXPECT_EQ("shard_path", req.key); EXPECT_EQ(StorageGeneration::Unknown(), req.options.generation_conditions.if_not_equal); EXPECT_EQ(StorageGeneration::FromString("g0"), req.options.generation_conditions.if_equal); EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range); EXPECT_EQ(init_time, req.options.staleness_bound); read_time = absl::Now(); req.promise.SetResult( ReadResult{ReadResult::kValue, Bytes({5, 6, 7, 8, 9}), {StorageGeneration::FromString("g0"), read_time}}); } ASSERT_EQ(0, mock_store->read_requests.size()); ASSERT_TRUE(future.ready()); EXPECT_THAT( future.result(), MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}), StorageGeneration::FromString("g0"), read_time)); } { kvstore::ReadOptions options; options.staleness_bound = init_time; auto future = store->Read(EntryIdToKey(2, grid_shape), options); absl::Time abort_time; { auto req = mock_store->read_requests.pop_nonblock().value(); ASSERT_EQ(0, mock_store->read_requests.size()); EXPECT_EQ("shard_path", req.key); EXPECT_EQ(StorageGeneration::Unknown(), req.options.generation_conditions.if_not_equal); EXPECT_EQ(init_time, req.options.staleness_bound); EXPECT_EQ(StorageGeneration::FromString("g0"), req.options.generation_conditions.if_equal); EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range); abort_time = absl::Now(); req.promise.SetResult(ReadResult::Unspecified( {StorageGeneration::FromString("g0"), abort_time})); } { auto req = mock_store->read_requests.pop_nonblock().value(); ASSERT_EQ(0, mock_store->read_requests.size()); EXPECT_EQ("shard_path", req.key); EXPECT_EQ(StorageGeneration::FromString("g0"), req.options.generation_conditions.if_not_equal); EXPECT_EQ(StorageGeneration::Unknown(), req.options.generation_conditions.if_equal); EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4), req.options.byte_range); EXPECT_THAT(req.options.staleness_bound, ::testing::Ge(abort_time)); shard_index_time = absl::Now(); req.promise.SetResult( ReadResult{ReadResult::kValue, WithCrc32c(Bytes({ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 10, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
Future<TimestampedStorageGeneration> Write(Key key, std::optional<Value> value, WriteOptions options) override; absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction, size_t& phase, Key key, ReadModifyWriteSource& source) override; absl::Status TransactionalDeleteRange( const internal::OpenTransactionPtr& transaction, KeyRange range) override; Future<const void> DeleteRange(KeyRange range) override; std::string DescribeKey(std::string_view key) override; kvstore::SupportedFeatures GetSupportedFeatures( const KeyRange& key_range) const final; Result<KvStore> GetBase(std::string_view path, const Transaction& transaction) const override; kvstore::Driver* base_kvstore_driver() const { return shard_index_cache()->base_kvstore_driver(); }
TEST_F(RawEncodingTest, WritesAndDeletes) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); StorageGeneration gen1, gen2, gen3; { tensorstore::Transaction txn(tensorstore::isolated); auto init_future1 = kvstore::WriteCommitted( KvStore{store, txn}, EntryIdToKey(1, grid_shape), absl::Cord("a")); auto init_future2 = kvstore::WriteCommitted( KvStore{store, txn}, EntryIdToKey(2, grid_shape), absl::Cord("bc")); auto init_future3 = kvstore::WriteCommitted( KvStore{store, txn}, EntryIdToKey(3, grid_shape), absl::Cord("def")); txn.CommitAsync().IgnoreFuture(); gen1 = init_future1.value().generation; gen2 = init_future2.value().generation; gen3 = init_future3.value().generation; } tensorstore::Transaction txn(tensorstore::isolated); auto future1 = kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape), {StorageGeneration::NoValue()}); auto future2 = kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape), absl::Cord("ww"), {gen2}); auto future3 = kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape), absl::Cord("xx"), {gen2}); auto future4 = kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(4, grid_shape), absl::Cord("zz"), {StorageGeneration::NoValue()}); auto future5 = kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(3, grid_shape), {gen3}); txn.CommitAsync().IgnoreFuture(); EXPECT_THAT(future1.result(), MatchesTimestampedStorageGeneration( StorageGeneration::Unknown())); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto shard_read, base_kv_store->Read("shard_path").result()); EXPECT_THAT( std::vector({future2.result(), future3.result()}), ::testing::UnorderedElementsAre( MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()), MatchesTimestampedStorageGeneration(shard_read.stamp.generation))); EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(), MatchesKvsReadResult(absl::Cord("a"))); EXPECT_THAT(store->Read(EntryIdToKey(2, grid_shape)).result(), MatchesKvsReadResult( !StorageGeneration::IsUnknown(future2.result()->generation) ? absl::Cord("ww") : absl::Cord("xx"))); EXPECT_THAT(store->Read(EntryIdToKey(3, grid_shape)).result(), MatchesKvsReadResultNotFound()); EXPECT_THAT(store->Read(EntryIdToKey(4, grid_shape)).result(), MatchesKvsReadResult(absl::Cord("zz"))); } TEST_F(RawEncodingTest, WriteThenDelete) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); TENSORSTORE_ASSERT_OK( store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a")).result()); EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(), MatchesKvsReadResult(absl::Cord("a"))); TENSORSTORE_ASSERT_OK(store->Delete(EntryIdToKey(1, grid_shape)).result()); EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(), MatchesKvsReadResultNotFound()); } TEST_F(RawEncodingTest, MultipleDeleteExisting) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); StorageGeneration gen; tensorstore::Transaction txn{tensorstore::no_transaction}; EXPECT_THAT( TestOrderDependentWrites( [&] { gen = store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a")) .value() .generation; txn = tensorstore::Transaction(tensorstore::isolated); }, [&] { return kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape), {gen}); }, [&] { return kvstore::DeleteCommitted( KvStore{store, txn}, EntryIdToKey(1, grid_shape), {StorageGeneration::NoValue()}); }, [&] { txn.CommitAsync().IgnoreFuture(); }), ::testing::UnorderedElementsAre( ::testing::ElementsAre( MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()), MatchesTimestampedStorageGeneration( StorageGeneration::NoValue())), ::testing::ElementsAre( MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()), MatchesTimestampedStorageGeneration( StorageGeneration::Unknown())))); } TEST_F(RawEncodingTest, WriteWithUnmatchedConditionAfterDelete) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); tensorstore::Transaction txn{tensorstore::no_transaction}; EXPECT_THAT( TestOrderDependentWrites( [&] { store->Delete(EntryIdToKey(0, grid_shape)).value(); txn = tensorstore::Transaction(tensorstore::isolated); }, [&] { return kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(0, grid_shape), absl::Cord("a")); }, [&] { return kvstore::WriteCommitted( KvStore{store, txn}, EntryIdToKey(0, grid_shape), absl::Cord("b"), {StorageGeneration::FromString("g")}); }, [&] { txn.CommitAsync().IgnoreFuture(); }), ::testing::Each(::testing::ElementsAre( MatchesTimestampedStorageGeneration( ::testing::AllOf(::testing::Not(StorageGeneration::NoValue()), ::testing::Not(StorageGeneration::Invalid()))), MatchesTimestampedStorageGeneration(StorageGeneration::Unknown())))); } TEST_F(RawEncodingTest, MultipleDeleteNonExisting) { std::vector<Index> grid_shape{100}; kvstore::DriverPtr store = GetStore(grid_shape); tensorstore::Transaction txn(tensorstore::isolated); std::vector futures{ kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape), {StorageGeneration::NoValue()}), kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape), {StorageGeneration::NoValue()})}; txn.CommitAsync().IgnoreFuture(); std::vector results{futures[0].result(), futures[1].result()}; EXPECT_THAT( results, ::testing::UnorderedElementsAre( MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()), MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()))); }
#include "tensorflow/core/data/service/dispatcher_state.h" #include <algorithm> #include <memory> #include <optional> #include <string> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "tensorflow/core/data/service/common.h" #include "tensorflow/core/data/service/journal.h" #include "tensorflow/core/data/service/journal.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/protobuf/data_service.pb.h" #include "tensorflow/core/protobuf/service_config.pb.h" namespace tensorflow { namespace data { DispatcherState::DispatcherState() : worker_index_resolver_(std::vector<std::string>{}) {} DispatcherState::DispatcherState( const experimental::DispatcherConfig& dispatcher_config) : worker_index_resolver_(dispatcher_config.worker_addresses()) {} Status DispatcherState::Apply(const Update& update) { switch (update.update_type_case()) { case Update::kRegisterDataset: RegisterDataset(update.register_dataset()); break; case Update::kRegisterWorker: RegisterWorker(update.register_worker()); break; case Update::kCreateJob: CreateJob(update.create_job()); break; case Update::kCreateIteration: CreateIteration(update.create_iteration()); break; case Update::kProduceSplit: ProduceSplit(update.produce_split()); break; case Update::kAcquireIterationClient: AcquireIterationClient(update.acquire_iteration_client()); break; case Update::kReleaseIterationClient: ReleaseIterationClient(update.release_iteration_client()); break; case Update::kGarbageCollectIteration: GarbageCollectIteration(update.garbage_collect_iteration()); break; case Update::kRemoveTask: RemoveTask(update.remove_task()); break; case Update::kCreatePendingTask: CreatePendingTask(update.create_pending_task()); break; case Update::kClientHeartbeat: ClientHeartbeat(update.client_heartbeat()); break; case Update::kCreateTask: CreateTask(update.create_task()); break; case Update::kFinishTask: FinishTask(update.finish_task()); break; case Update::kSnapshot: Snapshot(update.snapshot()); break; case Update::kCompressionDisabledAtRuntime: CompressionDisabledAtRuntime(update.compression_disabled_at_runtime()); break; case Update::UPDATE_TYPE_NOT_SET: return errors::Internal("Update type not set."); } return absl::OkStatus(); } void DispatcherState::RegisterDataset( const RegisterDatasetUpdate& register_dataset) { std::string dataset_id = register_dataset.dataset_id(); auto dataset = std::make_shared<Dataset>(dataset_id, register_dataset.metadata()); DCHECK(!datasets_by_id_.contains(dataset_id)); datasets_by_id_[dataset_id] = dataset; UpdateNextAvailableDatasetId(); } void DispatcherState::RegisterWorker( const RegisterWorkerUpdate& register_worker) { std::string address = register_worker.worker_address(); DCHECK(!workers_.contains(address)); workers_[address] = std::make_shared<Worker>(register_worker); tasks_by_worker_[address] = absl::flat_hash_map<int64_t, std::shared_ptr<Task>>(); worker_index_resolver_.AddWorker(address); } void DispatcherState::CreateJob(const CreateJobUpdate& create_job) { int64_t job_id = create_job.job_id(); std::string job_name = create_job.job_name(); std::optional<int64_t> num_consumers; if (create_job.optional_num_consumers_case() == CreateJobUpdate::kNumConsumers) { num_consumers = create_job.num_consumers(); } auto job = std::make_shared<Job>( job_id, create_job.dataset_id(), create_job.processing_mode_def(), job_name, num_consumers, create_job.use_cross_trainer_cache(), create_job.target_workers()); DCHECK(!jobs_by_id_.contains(job_id)); jobs_by_id_[job_id] = job; DCHECK(!jobs_by_name_.contains(job_name)); jobs_by_name_[job_name] = job; next_available_job_id_ = std::max(next_available_job_id_, job_id + 1); } Status DispatcherState::JobFromId(int64_t job_id, std::shared_ptr<const Job>& job) const { auto it = jobs_by_id_.find(job_id); if (it == jobs_by_id_.end()) { return errors::NotFound("Job with id ", job_id, " not found"); } job = it->second; return absl::OkStatus(); } Status DispatcherState::JobByName(const std::string& job_name, std::shared_ptr<const Job>& job) const { auto it = jobs_by_name_.find(job_name); if (it == jobs_by_name_.end()) { return errors::NotFound("Job with name ", job_name, " not found"); } job = it->second; return absl::OkStatus(); } void DispatcherState::CreateIteration( const CreateIterationUpdate& create_iteration) { int64_t iteration_id = create_iteration.iteration_id(); int64_t job_id = create_iteration.job_id(); DCHECK(jobs_by_id_.contains(job_id)); auto& job = jobs_by_id_[job_id]; DCHECK(job); IterationKey iteration_key(job->job_name, create_iteration.repetition()); auto iteration = std::make_shared<Iteration>( iteration_id, iteration_key, create_iteration.num_split_providers(), job); DCHECK(!iterations_.contains(iteration_id)); iterations_[iteration_id] = iteration; tasks_by_iteration_[iteration_id] = std::vector<std::shared_ptr<Task>>(); DCHECK(!iterations_by_key_.contains(iteration_key) || iterations_by_key_[iteration_key]->garbage_collected); iterations_by_key_[iteration_key] = iteration; next_available_iteration_id_ = std::max(next_available_iteration_id_, iteration_id + 1); } void DispatcherState::ProduceSplit(const ProduceSplitUpdate& produce_split) { std::shared_ptr<Iteration> iteration = iterations_[produce_split.iteration_id()]; DCHECK(iteration->distributed_epoch_state.has_value()); DistributedEpochState& state = iteration->distributed_epoch_state.value(); int64_t provider_index = produce_split.split_provider_index(); DCHECK_GE(produce_split.repetition(), state.repetitions[provider_index]); state.repetitions[provider_index] = produce_split.repetition(); if (produce_split.finished()) { state.repetitions[provider_index]++; state.indices[provider_index] = 0; return; } state.indices[provider_index]++; } void DispatcherState::AcquireIterationClient( const AcquireIterationClientUpdate& acquire_iteration_client) { int64_t iteration_client_id = acquire_iteration_client.iteration_client_id(); std::shared_ptr<Iteration>& iteration = iterations_for_client_ids_[iteration_client_id]; DCHECK(!iteration); iteration = iterations_[acquire_iteration_client.iteration_id()]; DCHECK(iteration); iteration->num_clients++; next_available_iteration_client_id_ = std::max(next_available_iteration_client_id_, iteration_client_id + 1); } void DispatcherState::ReleaseIterationClient( const ReleaseIterationClientUpdate& release_iteration_client) { int64_t iteration_client_id = release_iteration_client.iteration_client_id(); std::shared_ptr<Iteration>& iteration = iterations_for_client_ids_[iteration_client_id]; DCHECK(iteration); iteration->num_clients--; DCHECK_GE(iteration->num_clients, 0); iteration->last_client_released_micros = release_iteration_client.time_micros(); iterations_for_client_ids_.erase(iteration_client_id); } void DispatcherState::GarbageCollectIteration( const GarbageCollectIterationUpdate& garbage_collect_iteration) { int64_t iteration_id = garbage_collect_iteration.iteration_id(); for (auto& task : tasks_by_iteration_[iteration_id]) { task->finished = true; tasks_by_worker_[task->worker_address].erase(task->task_id); } iterations_[iteration_id]->finished = true; iterations_[iteration_id]->garbage_collected = true; } void DispatcherState::RemoveTask(const RemoveTaskUpdate& remove_task) { std::shared_ptr<Task>& task = tasks_[remove_task.task_id()]; DCHECK(task); task->removed = true; auto& tasks_for_iteration = tasks_by_iteration_[task->iteration->iteration_id]; for (auto it = tasks_for_iteration.begin(); it != tasks_for_iteration.end(); ++it) { if ((*it)->task_id == task->task_id) { tasks_for_iteration.erase(it); break; } } tasks_by_worker_[task->worker_address].erase(task->task_id); tasks_.erase(task->task_id); VLOG(1) << "Removed task " << remove_task.task_id() << " from worker " << task->worker_address; } void DispatcherState::CreatePendingTask( const CreatePendingTaskUpdate& create_pending_task) { int64_t task_id = create_pending_task.task_id(); auto& task = tasks_[task_id]; DCHECK_EQ(task, nullptr); auto& iteration = iterations_[create_pending_task.iteration_id()]; DCHECK_NE(iteration, nullptr); task = std::make_shared<Task>(create_pending_task, iteration); iteration->pending_tasks.emplace(task, create_pending_task.starting_round()); tasks_by_worker_[create_pending_task.worker_address()][task->task_id] = task; next_available_task_id_ = std::max(next_available_task_id_, task_id + 1); } void DispatcherState::ClientHeartbeat( const ClientHeartbeatUpdate& client_heartbeat) { int64_t iteration_client_id = client_heartbeat.iteration_client_id(); auto& iteration = iterations_for_client_ids_[iteration_client_id]; DCHECK(!iteration->pending_tasks.empty()); auto& task = iteration->pending_tasks.front(); if (client_heartbeat.has_task_rejected()) { task.failures++; task.ready_consumers.clear(); task.target_round = client_heartbeat.task_rejected().new_target_round(); } if (client_heartbeat.task_accepted()) { task.ready_consumers.insert(iteration_client_id); if (task.ready_consumers.size() == iteration->job->num_consumers.value()) { VLOG(1) << "Promoting task " << task.task->task_id << " from pending to active"; task.task->starting_round = task.target_round; tasks_by_iteration_[iteration->iteration_id].push_back(task.task); iteration->pending_tasks.pop(); } } } void DispatcherState::CreateTask(const CreateTaskUpdate& create_task) { int64_t task_id = create_task.task_id(); auto& task = tasks_[task_id]; DCHECK_EQ(task, nullptr); auto& iteration = iterations_[create_task.iteration_id()]; DCHECK_NE(iteration, nullptr); task = std::make_shared<Task>(create_task, iteration); tasks_by_iteration_[create_task.iteration_id()].push_back(task); tasks_by_worker_[create_task.worker_address()][task->task_id] = task; next_available_task_id_ = std::max(next_available_task_id_, task_id + 1); } void DispatcherState::FinishTask(const FinishTaskUpdate& finish_task) { VLOG(2) << "Marking task " << finish_task.task_id() << " as finished"; int64_t task_id = finish_task.task_id(); auto& task = tasks_[task_id]; DCHECK(task != nullptr); task->finished = true; tasks_by_worker_[task->worker_address].erase(task->task_id); bool all_finished = true; for (const auto& task_for_iteration : tasks_by_iteration_[task->iteration->iteration_id]) { if (!task_for_iteration->finished) { all_finished = false; } } VLOG(3) << "Iteration " << task->iteration->iteration_id << " finished: " << all_finished; iterations_[task->iteration->iteration_id]->finished = all_finished; } std::string DispatcherState::NextAvailableDatasetId() const { return absl::StrCat(next_available_dataset_id_); } void DispatcherState::UpdateNextAvailableDatasetId() { while (datasets_by_id_.contains(absl::StrCat(next_available_dataset_id_))) { ++next_available_dataset_id_; } } Status DispatcherState::DatasetFromId( const std::string& id, std::shared_ptr<const Dataset>& dataset) const { auto it = datasets_by_id_.find(id); if (it == datasets_by_id_.end()) { return errors::NotFound("Dataset id ", id, " not found"); } dataset = it->second; return absl::OkStatus(); } Status DispatcherState::WorkerFromAddress( const std::string& address, std::shared_ptr<const Worker>& worker) const { auto it = workers_.find(address); if (it == workers_.end()) { return errors::NotFound("Worker with address ", address, " not found."); } worker = it->second; return absl::OkStatus(); } std::vector<std::shared_ptr<const DispatcherState::Worker>> DispatcherState::ListWorkers() const { std::vector<std::shared_ptr<const Worker>> workers; workers.reserve(workers_.size()); for (const auto& it : workers_) { workers.push_back(it.second); } return workers; } std::vector<std::shared_ptr<const DispatcherState::Iteration>> DispatcherState::ListIterations() const { std::vector<std::shared_ptr<const DispatcherState::Iteration>> iterations; iterations.reserve(iterations_.size()); for (const auto& it : iterations_) { iterations.push_back(it.second); } return iterations; } Status DispatcherState::IterationFromId( int64_t id, std::shared_ptr<const Iteration>& iteration) const { auto it = iterations_.find(id); if (it == iterations_.end()) { return errors::NotFound("Iteration id ", id, " not found"); } iteration = it->second; return absl::OkStatus(); } Status DispatcherState::IterationByKey( IterationKey iteration_key, std::shared_ptr<const Iteration>& iteration) const { auto it = iterations_by_key_.find(iteration_key); if (it == iterations_by_key_.end()) { return errors::NotFound("Iteration key ", iteration_key.DebugString(), " not found"); } iteration = it->second; return absl::OkStatus(); } int64_t DispatcherState::NextAvailableJobId() const { return next_available_job_id_; } int64_t DispatcherState::NextAvailableIterationId() const { return next_available_iteration_id_; } Status DispatcherState::IterationForIterationClientId( int64_t iteration_client_id, std::shared_ptr<const Iteration>& iteration) { iteration = iterations_for_client_ids_[iteration_client_id]; if (!iteration) { return errors::NotFound("Iteration client id not found: ", iteration_client_id); } return absl::OkStatus(); } std::vector<int64_t> DispatcherState::ListActiveClientIds() { std::vector<int64_t> ids; for (const auto& it : iterations_for_client_ids_) { if (it.second && !it.second->finished) { ids.push_back(it.first); } } return ids; } int64_t DispatcherState::NextAvailableIterationClientId() const { return next_available_iteration_client_id_; } Status DispatcherState::TaskFromId(int64_t id, std::shared_ptr<const Task>& task) const { auto it = tasks_.find(id); if (it == tasks_.end()) { return errors::NotFound("Task ", id, " not found"); } task = it->second; return absl::OkStatus(); } Status DispatcherState::TasksForIteration( int64_t iteration_id, std::vector<std::shared_ptr<const Task>>& tasks) const { auto it = tasks_by_iteration_.find(iteration_id); if (it == tasks_by_iteration_.end()) { return errors::NotFound("Iteration ", iteration_id, " not found"); } tasks.clear(); tasks.reserve(it->second.size()); for (const auto& task : it->second) { tasks.push_back(task); } return absl::OkStatus(); } Status DispatcherState::TasksForWorker( absl::string_view worker_address, std::vector<std::shared_ptr<const Task>>& tasks) const { tasks.clear(); auto it = tasks_by_worker_.find(worker_address); if (it == tasks_by_worker_.end()) { return errors::NotFound("Worker ", worker_address, " not found"); } const absl::flat_hash_map<int64_t, std::shared_ptr<Task>>& worker_tasks = it->second; tasks.reserve(worker_tasks.size()); for (const auto& task : worker_tasks) { tasks.push_back(task.second); } return absl::OkStatus(); } int64_t DispatcherState::NextAvailableTaskId() const { return next_available_task_id_; } Status DispatcherState::ValidateWorker(absl::string_view worker_address) const { return worker_index_resolver_.ValidateWorker(worker_address); } absl::StatusOr<int64_t> DispatcherState::GetWorkerIndex( absl::string_view worker_address) const { return worker_index_resolver_.GetWorkerIndex(worker_address); } void DispatcherState::Snapshot(const SnapshotUpdate& snapshot) { snapshot_paths_.insert(snapshot.path()); } void DispatcherState::CompressionDisabledAtRuntime( const CompressionDisabledAtRuntimeUpdate& compression_disabled_at_runtime) { compression_disabled_at_runtime_.insert({ compression_disabled_at_runtime.dataset_id(), compression_disabled_at_runtime.compression_disabled(), }); } std::optional<bool> DispatcherState::CompressionDisabledAtRuntime( const std::string& dataset_id) const { if (auto it = compression_disabled_at_runtime_.find(dataset_id); it != compression_disabled_at_runtime_.end()) { return it->second; } return std::nullopt; } } }
#include "tensorflow/core/data/service/dispatcher_state.h" #include <cstdint> #include <memory> #include <string> #include <vector> #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/data/service/journal.pb.h" #include "tensorflow/core/platform/random.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/data_service.pb.h" #include "tensorflow/core/protobuf/service_config.pb.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/status_matchers.h" namespace tensorflow { namespace data { namespace { using Dataset = DispatcherState::Dataset; using Worker = DispatcherState::Worker; using IterationKey = DispatcherState::IterationKey; using Job = DispatcherState::Job; using Iteration = DispatcherState::Iteration; using Task = DispatcherState::Task; using ::testing::HasSubstr; using ::testing::IsEmpty; using ::testing::SizeIs; using ::testing::UnorderedElementsAre; using ::tsl::testing::StatusIs; Status RegisterDataset(const std::string& dataset_id, DispatcherState& state) { Update update; RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset(); register_dataset->set_dataset_id(dataset_id); return state.Apply(update); } Status RegisterWorker(std::string worker_address, DispatcherState& state) { Update update; update.mutable_register_worker()->set_worker_address(worker_address); return state.Apply(update); } Status CreateJob(int64_t job_id, const std::string& dataset_id, const std::string& job_name, DispatcherState& state) { Update update; CreateJobUpdate* create_job = update.mutable_create_job(); create_job->set_job_id(job_id); create_job->set_dataset_id(dataset_id); create_job->set_job_name(job_name); return state.Apply(update); } Status CreateIteration(int64_t iteration_id, const std::string& dataset_id, const IterationKey& named_iteration_key, DispatcherState& state) { int64_t job_id = state.NextAvailableJobId(); TF_RETURN_IF_ERROR( CreateJob(job_id, dataset_id, named_iteration_key.name, state)); Update update; CreateIterationUpdate* create_iteration = update.mutable_create_iteration(); create_iteration->set_job_id(job_id); create_iteration->set_iteration_id(iteration_id); create_iteration->set_repetition(named_iteration_key.repetition); return state.Apply(update); } Status CreateIteration(int64_t iteration_id, const std::string& dataset_id, DispatcherState& state) { IterationKey key(absl::StrCat(random::New64()), 0); return CreateIteration(iteration_id, dataset_id, key, state); } Status AcquireIterationClientId(int64_t iteration_id, int64_t iteration_client_id, DispatcherState& state) { Update update; AcquireIterationClientUpdate* acquire_iteration_client = update.mutable_acquire_iteration_client(); acquire_iteration_client->set_iteration_id(iteration_id); acquire_iteration_client->set_iteration_client_id(iteration_client_id); return state.Apply(update); } Status ReleaseIterationClientId(int64_t iteration_client_id, int64_t release_time, DispatcherState& state) { Update update; ReleaseIterationClientUpdate* release_iteration_client = update.mutable_release_iteration_client(); release_iteration_client->set_iteration_client_id(iteration_client_id); release_iteration_client->set_time_micros(release_time); return state.Apply(update); } Status CreateTask(int64_t task_id, int64_t iteration_id, const std::string& worker_address, DispatcherState& state) { Update update; CreateTaskUpdate* create_task = update.mutable_create_task(); create_task->set_task_id(task_id); create_task->set_iteration_id(iteration_id); create_task->set_worker_address(worker_address); return state.Apply(update); } Status FinishTask(int64_t task_id, DispatcherState& state) { Update update; FinishTaskUpdate* finish_task = update.mutable_finish_task(); finish_task->set_task_id(task_id); return state.Apply(update); } Status Snapshot(const std::string& path, DispatcherState& state) { Update update; SnapshotUpdate* snapshot = update.mutable_snapshot(); snapshot->set_path(path); return state.Apply(update); } } TEST(DispatcherState, RegisterDataset) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t dataset_id_int; ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int)); TF_EXPECT_OK(RegisterDataset(dataset_id, state)); EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1)); std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset)); EXPECT_TRUE(dataset->metadata.element_spec().empty()); EXPECT_EQ(dataset->metadata.compression(), DataServiceMetadata::COMPRESSION_UNSPECIFIED); } TEST(DispatcherState, RegisterDatasetWithExplicitID) { DispatcherState state; TF_EXPECT_OK(RegisterDataset("dataset_id", state)); std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId("dataset_id", dataset)); EXPECT_EQ(dataset->dataset_id, "dataset_id"); } TEST(DispatcherState, RegisterDatasetsWithDifferentIDs) { DispatcherState state; TF_EXPECT_OK(RegisterDataset("dataset_id1", state)); TF_EXPECT_OK(RegisterDataset("dataset_id2", state)); std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId("dataset_id1", dataset)); EXPECT_EQ(dataset->dataset_id, "dataset_id1"); TF_EXPECT_OK(state.DatasetFromId("dataset_id2", dataset)); EXPECT_EQ(dataset->dataset_id, "dataset_id2"); } TEST(DispatcherState, RegisterDatasetCompression) { DispatcherState state; const std::string dataset_id = state.NextAvailableDatasetId(); Update update; RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset(); register_dataset->set_dataset_id(dataset_id); register_dataset->mutable_metadata()->set_compression( DataServiceMetadata::COMPRESSION_SNAPPY); TF_ASSERT_OK(state.Apply(update)); { std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset)); EXPECT_EQ(dataset->metadata.compression(), DataServiceMetadata::COMPRESSION_SNAPPY); } } TEST(DispatcherState, RegisterDatasetElementSpec) { DispatcherState state; const std::string dataset_id = state.NextAvailableDatasetId(); Update update; RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset(); register_dataset->set_dataset_id(dataset_id); register_dataset->mutable_metadata()->set_element_spec( "encoded_element_spec"); TF_ASSERT_OK(state.Apply(update)); { std::shared_ptr<const Dataset> dataset; TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset)); EXPECT_EQ(dataset->metadata.element_spec(), "encoded_element_spec"); } } TEST(DispatcherState, MissingDatasetId) { DispatcherState state; std::shared_ptr<const Dataset> dataset; Status s = state.DatasetFromId("missing_dataset_id", dataset); EXPECT_EQ(s.code(), error::NOT_FOUND); } TEST(DispatcherState, NextAvailableDatasetId) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t dataset_id_int; ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int)); TF_EXPECT_OK(RegisterDataset(dataset_id, state)); EXPECT_NE(state.NextAvailableDatasetId(), dataset_id); EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1)); EXPECT_EQ(state.NextAvailableDatasetId(), state.NextAvailableDatasetId()); } TEST(DispatcherState, RegisterWorker) { DispatcherState state; std::string address = "test_worker_address"; TF_EXPECT_OK(RegisterWorker(address, state)); std::shared_ptr<const Worker> worker; TF_EXPECT_OK(state.WorkerFromAddress(address, worker)); EXPECT_EQ(worker->address, address); } TEST(DispatcherState, RegisterWorkerInFixedWorkerSet) { experimental::DispatcherConfig config; config.add_worker_addresses("/worker/task/0"); config.add_worker_addresses("/worker/task/1"); config.add_worker_addresses("/worker/task/2"); DispatcherState state(config); TF_EXPECT_OK(state.ValidateWorker("/worker/task/0:20000")); TF_EXPECT_OK(state.ValidateWorker("/worker/task/1:20000")); TF_EXPECT_OK(state.ValidateWorker("/worker/task/2:20000")); TF_EXPECT_OK(RegisterWorker("/worker/task/0:20000", state)); TF_EXPECT_OK(RegisterWorker("/worker/task/1:20000", state)); TF_EXPECT_OK(RegisterWorker("/worker/task/2:20000", state)); std::shared_ptr<const Worker> worker; TF_EXPECT_OK(state.WorkerFromAddress("/worker/task/0:20000", worker)); EXPECT_EQ(worker->address, "/worker/task/0:20000"); } TEST(DispatcherState, RegisterInvalidWorkerInFixedWorkerSet) { experimental::DispatcherConfig config; config.add_worker_addresses("/worker/task/0"); config.add_worker_addresses("/worker/task/1"); config.add_worker_addresses("/worker/task/2"); DispatcherState state(config); EXPECT_THAT(state.ValidateWorker("localhost:20000"), StatusIs(error::FAILED_PRECONDITION, HasSubstr("The worker's address is not configured"))); TF_EXPECT_OK(RegisterWorker("localhost:20000", state)); std::shared_ptr<const Worker> worker; EXPECT_THAT(state.WorkerFromAddress("/worker/task/0:20000", worker), StatusIs(error::NOT_FOUND, "Worker with address /worker/task/0:20000 not found.")); } TEST(DispatcherState, ListWorkers) { DispatcherState state; std::string address_1 = "address_1"; std::string address_2 = "address_2"; { std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers(); EXPECT_THAT(workers, IsEmpty()); } TF_EXPECT_OK(RegisterWorker(address_1, state)); { std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers(); EXPECT_THAT(workers, SizeIs(1)); } TF_EXPECT_OK(RegisterWorker(address_2, state)); { std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers(); EXPECT_THAT(workers, SizeIs(2)); } } TEST(DispatcherState, MissingWorker) { DispatcherState state; std::shared_ptr<const Worker> worker; Status s = state.WorkerFromAddress("test_worker_address", worker); EXPECT_EQ(s.code(), error::NOT_FOUND); } TEST(DispatcherState, UnknownUpdate) { DispatcherState state; Update update; Status s = state.Apply(update); EXPECT_EQ(s.code(), error::INTERNAL); } TEST(DispatcherState, JobName) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t job_id = state.NextAvailableJobId(); std::string job_name = "test_name"; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateJob(job_id, dataset_id, job_name, state)); std::shared_ptr<const Job> job; TF_EXPECT_OK(state.JobByName(job_name, job)); EXPECT_EQ(state.NextAvailableJobId(), job_id + 1); EXPECT_EQ(job->dataset_id, dataset_id); EXPECT_FALSE(job->use_cross_trainer_cache); } TEST(DispatcherState, JobData) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); int64_t job_id = state.NextAvailableJobId(); int64_t num_consumers = 8; bool use_cross_trainer_cache = true; TF_ASSERT_OK(RegisterDataset(dataset_id, state)); Update update; CreateJobUpdate* create_job = update.mutable_create_job(); create_job->set_job_id(job_id); create_job->set_dataset_id(dataset_id); create_job->set_num_consumers(num_consumers); create_job->set_use_cross_trainer_cache(use_cross_trainer_cache); TF_ASSERT_OK(state.Apply(update)); std::shared_ptr<const Job> job; TF_ASSERT_OK(state.JobFromId(job_id, job)); EXPECT_EQ(job->num_consumers, num_consumers); EXPECT_EQ(job->use_cross_trainer_cache, use_cross_trainer_cache); } TEST(DispatcherState, CrossTrainerCacheTask) { DispatcherState state; std::string dataset_id = state.NextAvailableDatasetId(); std::string worker_address = "test_worker_address"; TF_ASSERT_OK(RegisterDataset(dataset_id, state)); int64_t job_id = state.NextAvailableJobId(); Update job_update; CreateJobUpdate* create_job = job_update.mutable_create_job(); create_job->set_job_id(job_id); create_job->set_dataset_id(dataset_id); create_job->set_use_cross_trainer_cache(true); TF_ASSERT_OK(state.Apply(job_update)); int64_t iteration_id = state.NextAvailableIterationId(); Update iteration_update; CreateIterationUpdate* create_iteration = iteration_update.mutable_create_iteration(); create_iteration->set_job_id(job_id); create_iteration->set_iteration_id(iteration_id); TF_ASSERT_OK(state.Apply(iteration_update)); int64_t task_id = state.NextAvailableTaskId(); TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state)); std::shared_ptr<const Task> task; TF_EXPECT_OK(state.TaskFromId(task_id, task)); EXPECT_EQ(task->iteration->iteration_id, iteration_id); EXPECT_EQ(task->task_id, task_id); EXPECT_EQ(task->worker_address, worker_address); EXPECT_TRUE(task->iteration->job->use_cross_trainer_cache); } TEST(DispatcherState, CreateTask) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; std::string worker_address = "test_worker_address"; DispatcherState state; int64_t task_id = state.NextAvailableTaskId(); TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state)); EXPECT_EQ(state.NextAvailableTaskId(), task_id + 1); { std::shared_ptr<const Task> task; TF_EXPECT_OK(state.TaskFromId(task_id, task)); EXPECT_EQ(task->iteration->iteration_id, iteration_id); EXPECT_EQ(task->task_id, task_id); EXPECT_EQ(task->worker_address, worker_address); EXPECT_FALSE(task->iteration->job->use_cross_trainer_cache); } { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks)); EXPECT_THAT(tasks, SizeIs(1)); } { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks)); EXPECT_EQ(1, tasks.size()); } } TEST(DispatcherState, CreateTasksForSameIteration) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id_1 = 8; int64_t task_id_2 = 9; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks)); EXPECT_THAT(tasks, SizeIs(2)); } } TEST(DispatcherState, CreateTasksForDifferentIterations) { std::string dataset_id = "dataset_id"; int64_t iteration_id_1 = 3; int64_t iteration_id_2 = 4; int64_t task_id_1 = 8; int64_t task_id_2 = 9; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id_1, dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id_2, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id_1, worker_address, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id_2, worker_address, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForIteration(iteration_id_1, tasks)); EXPECT_THAT(tasks, SizeIs(1)); } { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForIteration(iteration_id_2, tasks)); EXPECT_THAT(tasks, SizeIs(1)); } } TEST(DispatcherState, CreateTasksForSameWorker) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id_1 = 8; int64_t task_id_2 = 9; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks)); EXPECT_EQ(2, tasks.size()); } } TEST(DispatcherState, CreateTasksForDifferentWorkers) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id_1 = 8; int64_t task_id_2 = 9; std::string worker_address_1 = "test_worker_address_1"; std::string worker_address_2 = "test_worker_address_2"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address_1, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address_2, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address_1, tasks)); EXPECT_EQ(1, tasks.size()); } { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address_2, tasks)); EXPECT_EQ(1, tasks.size()); } } TEST(DispatcherState, GetTasksForWorkerEmpty) { std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterWorker(worker_address, state)); { std::vector<std::shared_ptr<const Task>> tasks; TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks)); EXPECT_EQ(0, tasks.size()); } } TEST(DispatcherState, FinishTask) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id = 4; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state)); TF_EXPECT_OK(FinishTask(task_id, state)); std::shared_ptr<const Task> task; TF_EXPECT_OK(state.TaskFromId(task_id, task)); EXPECT_TRUE(task->finished); std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_TRUE(iteration->finished); } TEST(DispatcherState, FinishMultiTaskIteration) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t task_id_1 = 4; int64_t task_id_2 = 5; std::string worker_address = "test_worker_address"; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state)); TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state)); TF_EXPECT_OK(FinishTask(task_id_1, state)); { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_FALSE(iteration->finished); } TF_EXPECT_OK(FinishTask(task_id_2, state)); { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_TRUE(iteration->finished); } } TEST(DispatcherState, AcquireIterationClientId) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t iteration_client_id_1 = 1; int64_t iteration_client_id_2 = 2; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_1, state)); { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_EQ(iteration->num_clients, 1); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_2, state)); EXPECT_EQ(iteration->num_clients, 2); } { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK( state.IterationForIterationClientId(iteration_client_id_1, iteration)); EXPECT_EQ(iteration->iteration_id, iteration_id); } { std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK( state.IterationForIterationClientId(iteration_client_id_2, iteration)); EXPECT_EQ(iteration->iteration_id, iteration_id); } } TEST(DispatcherState, ReleaseIterationClientId) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t iteration_client_id = 6; int64_t release_time = 100; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id, state)); TF_EXPECT_OK( ReleaseIterationClientId(iteration_client_id, release_time, state)); std::shared_ptr<const Iteration> iteration; TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration)); EXPECT_EQ(iteration->num_clients, 0); Status s = state.IterationForIterationClientId(iteration_client_id, iteration); EXPECT_EQ(s.code(), error::NOT_FOUND); } TEST(DispatcherState, ListActiveClientsEmpty) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t iteration_client_id = 6; int64_t release_time = 100; DispatcherState state; EXPECT_THAT(state.ListActiveClientIds(), IsEmpty()); TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id, state)); TF_EXPECT_OK( ReleaseIterationClientId(iteration_client_id, release_time, state)); EXPECT_THAT(state.ListActiveClientIds(), IsEmpty()); } TEST(DispatcherState, ListActiveClients) { std::string dataset_id = "dataset_id"; int64_t iteration_id = 3; int64_t iteration_client_id_1 = 6; int64_t iteration_client_id_2 = 7; int64_t iteration_client_id_3 = 8; int64_t release_time = 100; DispatcherState state; TF_EXPECT_OK(RegisterDataset(dataset_id, state)); TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_1, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_2, state)); TF_EXPECT_OK( ReleaseIterationClientId(iteration_client_id_2, release_time, state)); TF_EXPECT_OK( AcquireIterationClientId(iteration_id, iteration_client_id_3, state)); EXPECT_THAT(state.ListActiveClientIds(), UnorderedElementsAre(6, 8)); } TEST(DispatcherState, ListSnapshotPaths) { DispatcherState state; absl::flat_hash_set<std::string> snapshot_paths = {"p1", "p2"}; for (const auto& snapshot_path : snapshot_paths) { TF_EXPECT_OK(Snapshot(snapshot_path, state)); } EXPECT_EQ(state.ListSnapshotPaths(), snapshot_paths); } TEST(DispatcherState, GetNumberOfRegisteredWorkers) { DispatcherState state; std::string address_1 = "address_1"; std::string address_2 = "address_2"; EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 0); TF_EXPECT_OK(RegisterWorker(address_1, state)); EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 1); TF_EXPECT_OK(RegisterWorker(address_2, state)); EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 2); } } }
void DispatcherState::RegisterWorker( const RegisterWorkerUpdate& register_worker) { std::string address = register_worker.worker_address(); DCHECK(!workers_.contains(address)); workers_[address] = std::make_shared<Worker>(register_worker); tasks_by_worker_[address] = absl::flat_hash_map<int64_t, std::shared_ptr<Task>>(); worker_index_resolver_.AddWorker(address); }
TEST(DispatcherState, RegisterWorker) { DispatcherState state; std::string address = "test_worker_address"; TF_EXPECT_OK(RegisterWorker(address, state)); std::shared_ptr<const Worker> worker; TF_EXPECT_OK(state.WorkerFromAddress(address, worker)); EXPECT_EQ(worker->address, address); } TEST(DispatcherState, RegisterWorkerInFixedWorkerSet) { experimental::DispatcherConfig config; config.add_worker_addresses("/worker/task/0"); config.add_worker_addresses("/worker/task/1"); config.add_worker_addresses("/worker/task/2"); DispatcherState state(config); TF_EXPECT_OK(state.ValidateWorker("/worker/task/0:20000")); TF_EXPECT_OK(state.ValidateWorker("/worker/task/1:20000")); TF_EXPECT_OK(state.ValidateWorker("/worker/task/2:20000")); TF_EXPECT_OK(RegisterWorker("/worker/task/0:20000", state)); TF_EXPECT_OK(RegisterWorker("/worker/task/1:20000", state)); TF_EXPECT_OK(RegisterWorker("/worker/task/2:20000", state)); std::shared_ptr<const Worker> worker; TF_EXPECT_OK(state.WorkerFromAddress("/worker/task/0:20000", worker)); EXPECT_EQ(worker->address, "/worker/task/0:20000"); } TEST(DispatcherState, ListWorkers) { DispatcherState state; std::string address_1 = "address_1"; std::string address_2 = "address_2"; { std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers(); EXPECT_THAT(workers, IsEmpty()); } TF_EXPECT_OK(RegisterWorker(address_1, state)); { std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers(); EXPECT_THAT(workers, SizeIs(1)); } TF_EXPECT_OK(RegisterWorker(address_2, state)); { std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers(); EXPECT_THAT(workers, SizeIs(2)); } }
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::DimensionHandle; using shape_inference::InferenceContext; using shape_inference::ShapeHandle; namespace { Status MakeBatchSquareMatrix(InferenceContext* c, ShapeHandle input, ShapeHandle* out) { ShapeHandle s; TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, 2, &s)); DimensionHandle d; TF_RETURN_IF_ERROR(c->Merge(c->Dim(s, -2), c->Dim(s, -1), &d)); ShapeHandle batch_shape; TF_RETURN_IF_ERROR(c->Subshape(s, 0, -2, &batch_shape)); TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(d, d), out)); return absl::OkStatus(); } Status BatchUnchangedSquareShapeFn(InferenceContext* c) { ShapeHandle out; TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &out)); c->set_output(0, out); return absl::OkStatus(); } Status BandedTriangularSolveShapeFn(InferenceContext* c) { ShapeHandle lhs; ShapeHandle rhs; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &lhs)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs)); DimensionHandle num_bands = c->Dim(lhs, -2); DimensionHandle m = c->Dim(lhs, -1); if (c->ValueKnown(num_bands) && c->Value(num_bands) <= 0) { return errors::InvalidArgument("Number of bands must be positive, but is ", c->Value(num_bands)); } if (c->ValueKnown(num_bands) && c->ValueKnown(m) && c->Value(num_bands) > c->Value(m)) { return errors::InvalidArgument("Number of bands ", c->Value(num_bands), " cannot exceed the size of the matrix ", c->Value(m)); } ShapeHandle lhs_batch_shape; ShapeHandle rhs_batch_shape; ShapeHandle output_batch_shape; TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape)); TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper( c, lhs_batch_shape, rhs_batch_shape, true, &output_batch_shape)); TF_RETURN_IF_ERROR(c->Merge(m, c->Dim(rhs, -2), &m)); ShapeHandle out; TF_RETURN_IF_ERROR( c->Concatenate(output_batch_shape, c->Matrix(m, c->Dim(rhs, -1)), &out)); c->set_output(0, out); return absl::OkStatus(); } Status MatrixSolveShapeFn(InferenceContext* c, bool square) { ShapeHandle lhs; ShapeHandle rhs; if (square) { TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &lhs)); } else { TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &lhs)); } TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs)); ShapeHandle lhs_batch_shape; ShapeHandle rhs_batch_shape; TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape)); TF_RETURN_IF_ERROR( c->Merge(lhs_batch_shape, rhs_batch_shape, &lhs_batch_shape)); DimensionHandle m; TF_RETURN_IF_ERROR(c->Merge(c->Dim(lhs, -2), c->Dim(rhs, -2), &m)); DimensionHandle n = c->Dim(lhs, -1); if (square) { TF_RETURN_IF_ERROR(c->Merge(m, n, &n)); } ShapeHandle out; TF_RETURN_IF_ERROR(c->Concatenate(lhs_batch_shape, c->Vector(n), &out)); TF_RETURN_IF_ERROR(c->Concatenate(out, c->Vector(c->Dim(rhs, -1)), &out)); c->set_output(0, out); return absl::OkStatus(); } Status MatrixTriangularSolveShapeFn(InferenceContext* c) { ShapeHandle lhs; ShapeHandle rhs; TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &lhs)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs)); ShapeHandle lhs_batch_shape; ShapeHandle rhs_batch_shape; ShapeHandle output_batch_shape; TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape)); TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper( c, lhs_batch_shape, rhs_batch_shape, true, &output_batch_shape)); DimensionHandle m; TF_RETURN_IF_ERROR(c->Merge(c->Dim(lhs, -1), c->Dim(rhs, -2), &m)); ShapeHandle out; TF_RETURN_IF_ERROR( c->Concatenate(output_batch_shape, c->Matrix(m, c->Dim(rhs, -1)), &out)); c->set_output(0, out); return absl::OkStatus(); } Status SelfAdjointEigV2ShapeFn(InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &input)); DimensionHandle n; TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n)); ShapeHandle batch_shape; TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape)); ShapeHandle e_shape; TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(n), &e_shape)); c->set_output(0, e_shape); bool compute_v; TF_RETURN_IF_ERROR(c->GetAttr("compute_v", &compute_v)); if (compute_v) { ShapeHandle v_shape; TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(n, n), &v_shape)); c->set_output(1, v_shape); } else { c->set_output(1, c->Vector(0ll)); } return absl::OkStatus(); } Status LuShapeFn(InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input)); DimensionHandle n; TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n)); ShapeHandle batch_shape; TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape)); ShapeHandle lu_shape; ShapeHandle p_shape; TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(n, n), &lu_shape)); TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(n), &p_shape)); c->set_output(0, lu_shape); c->set_output(1, p_shape); return absl::OkStatus(); } Status QrShapeFn(InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input)); DimensionHandle m = c->Dim(input, -2); DimensionHandle n = c->Dim(input, -1); DimensionHandle p; TF_RETURN_IF_ERROR(c->Min(m, n, &p)); ShapeHandle batch_shape; TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape)); ShapeHandle q_shape; ShapeHandle r_shape; bool full_matrices; TF_RETURN_IF_ERROR(c->GetAttr("full_matrices", &full_matrices)); if (full_matrices) { TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(m, m), &q_shape)); TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(m, n), &r_shape)); } else { TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(m, p), &q_shape)); TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(p, n), &r_shape)); } c->set_output(0, q_shape); c->set_output(1, r_shape); return absl::OkStatus(); } Status SvdShapeFn(InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input)); DimensionHandle m = c->Dim(input, -2); DimensionHandle n = c->Dim(input, -1); DimensionHandle p; TF_RETURN_IF_ERROR(c->Min(m, n, &p)); ShapeHandle batch_shape; TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape)); ShapeHandle e_shape; TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(p), &e_shape)); c->set_output(0, e_shape); bool compute_uv; TF_RETURN_IF_ERROR(c->GetAttr("compute_uv", &compute_uv)); if (compute_uv) { ShapeHandle u_shape; ShapeHandle v_shape; bool full_matrices; TF_RETURN_IF_ERROR(c->GetAttr("full_matrices", &full_matrices)); if (full_matrices) { TF_RETURN_IF_ERROR( c->Concatenate(batch_shape, c->Matrix(m, m), &u_shape)); TF_RETURN_IF_ERROR( c->Concatenate(batch_shape, c->Matrix(n, n), &v_shape)); } else { TF_RETURN_IF_ERROR( c->Concatenate(batch_shape, c->Matrix(m, p), &u_shape)); TF_RETURN_IF_ERROR( c->Concatenate(batch_shape, c->Matrix(n, p), &v_shape)); } c->set_output(1, u_shape); c->set_output(2, v_shape); } else { c->set_output(1, c->Vector(0ll)); c->set_output(2, c->Vector(0ll)); } return absl::OkStatus(); } Status TridiagonalMatMulShapeFn(InferenceContext* c) { ShapeHandle superdiag; ShapeHandle maindiag; ShapeHandle subdiag; ShapeHandle rhs; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &superdiag)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &maindiag)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 2, &subdiag)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(3), 2, &rhs)); ShapeHandle superdiag_batch_shape; ShapeHandle maindiag_batch_shape; ShapeHandle subdiag_batch_shape; ShapeHandle rhs_batch_shape; TF_RETURN_IF_ERROR(c->Subshape(superdiag, 0, -2, &superdiag_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(maindiag, 0, -2, &maindiag_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(subdiag, 0, -2, &subdiag_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape)); TF_RETURN_IF_ERROR(c->Merge(superdiag, maindiag, &superdiag)); TF_RETURN_IF_ERROR( c->Merge(maindiag_batch_shape, rhs_batch_shape, &rhs_batch_shape)); TF_RETURN_IF_ERROR( c->Merge(subdiag_batch_shape, rhs_batch_shape, &rhs_batch_shape)); TF_RETURN_IF_ERROR(c->Merge(superdiag, maindiag, &maindiag)); TF_RETURN_IF_ERROR(c->Merge(subdiag, maindiag, &maindiag)); DimensionHandle m_lhs = c->Dim(maindiag, -1); DimensionHandle m_rhs = c->Dim(rhs, -2); TF_RETURN_IF_ERROR(c->Merge(m_lhs, m_rhs, &m_lhs)); DimensionHandle unused; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(maindiag, -2), 1, &unused)); c->set_output(0, rhs); return absl::OkStatus(); } Status TridiagonalSolveShapeFn(InferenceContext* c) { ShapeHandle lhs; ShapeHandle rhs; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &lhs)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs)); ShapeHandle lhs_batch_shape; ShapeHandle rhs_batch_shape; TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape)); TF_RETURN_IF_ERROR( c->Merge(lhs_batch_shape, rhs_batch_shape, &lhs_batch_shape)); DimensionHandle m_lhs = c->Dim(lhs, -1); DimensionHandle m_rhs = c->Dim(rhs, -2); TF_RETURN_IF_ERROR(c->Merge(m_lhs, m_rhs, &m_lhs)); TF_RETURN_IF_ERROR(c->WithValue(c->Dim(lhs, -2), 3, &m_lhs)); c->set_output(0, rhs); return absl::OkStatus(); } } REGISTER_OP("MatrixDeterminant") .Input("input: T") .Output("output: T") .Attr("T: {half, float, double, complex64, complex128}") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input)); DimensionHandle unused; TF_RETURN_IF_ERROR( c->Merge(c->Dim(input, -1), c->Dim(input, -2), &unused)); ShapeHandle out; TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &out)); c->set_output(0, out); return absl::OkStatus(); }); REGISTER_OP("LogMatrixDeterminant") .Input("input: T") .Output("sign: T") .Output("log_abs_determinant: T") .Attr("T: {half, float, double, complex64, complex128}") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input)); DimensionHandle unused; TF_RETURN_IF_ERROR( c->Merge(c->Dim(input, -1), c->Dim(input, -2), &unused)); ShapeHandle s; TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &s)); c->set_output(0, s); ShapeHandle out; TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &out)); c->set_output(1, out); return absl::OkStatus(); }); REGISTER_OP("MatrixInverse") .Input("input: T") .Output("output: T") .Attr("adjoint: bool = False") .Attr("T: {double, float, half, complex64, complex128}") .SetShapeFn(BatchUnchangedSquareShapeFn); REGISTER_OP("MatrixExponential") .Deprecated( 27, "Use Python implementation tf.linalg.matrix_exponential instead.") .Input("input: T") .Output("output: T") .Attr("T: {double, float, half, complex64, complex128}") .SetShapeFn(BatchUnchangedSquareShapeFn); REGISTER_OP("MatrixLogarithm") .Input("input: T") .Output("output: T") .Attr("T: {complex64, complex128}") .SetShapeFn(BatchUnchangedSquareShapeFn); REGISTER_OP("Cholesky") .Input("input: T") .Output("output: T") .Attr("T: {double, float, half, complex64, complex128}") .SetShapeFn(BatchUnchangedSquareShapeFn); REGISTER_OP("CholeskyGrad") .Input("l: T") .Input("grad: T") .Output("output: T") .Attr("T: {half, float, double}") .SetShapeFn(BatchUnchangedSquareShapeFn); REGISTER_OP("SelfAdjointEig") .Input("input: T") .Output("output: T") .Attr("T: {double, float, half}") .Deprecated(11, "Use SelfAdjointEigV2 instead.") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &input)); DimensionHandle d = c->Dim(input, -1); DimensionHandle d_plus_1; TF_RETURN_IF_ERROR(c->Add(d, 1, &d_plus_1)); ShapeHandle s; TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &s)); TF_RETURN_IF_ERROR(c->Concatenate(s, c->Matrix(d_plus_1, d), &s)); c->set_output(0, s); return absl::OkStatus(); }); REGISTER_OP("Eig") .Input("input: T") .Output("e: Tout") .Output("v: Tout") .Attr("compute_v: bool = True") .Attr("T: {float, double, complex64, complex128}") .Attr("Tout: {complex64, complex128}") .SetShapeFn(SelfAdjointEigV2ShapeFn); REGISTER_OP("SelfAdjointEigV2") .Input("input: T") .Output("e: T") .Output("v: T") .Attr("compute_v: bool = True") .Attr("T: {double, float, half, complex64, complex128}") .SetShapeFn(SelfAdjointEigV2ShapeFn); REGISTER_OP("Lu") .Input("input: T") .Output("lu: T") .Output("p: output_idx_type") .Attr("T: {double, float, half, complex64, complex128}") .Attr("output_idx_type: {int32, int64} = DT_INT32") .SetShapeFn(LuShapeFn); REGISTER_OP("MatrixSolve") .Input("matrix: T") .Input("rhs: T") .Output("output: T") .Attr("adjoint: bool = False") .Attr("T: {double, float, half, complex64, complex128}") .SetShapeFn([](InferenceContext* c) { return MatrixSolveShapeFn(c, true ); }); REGISTER_OP("BandedTriangularSolve") .Input("matrix: T") .Input("rhs: T") .Output("output: T") .Attr("lower: bool = True") .Attr("adjoint: bool = False") .Attr("T: {double, float, half, complex64, complex128}") .SetShapeFn([](InferenceContext* c) { return BandedTriangularSolveShapeFn(c); }); REGISTER_OP("MatrixTriangularSolve") .Input("matrix: T") .Input("rhs: T") .Output("output: T") .Attr("lower: bool = True") .Attr("adjoint: bool = False") .Attr("T: {bfloat16, double, float, half, complex64, complex128}") .SetShapeFn([](InferenceContext* c) { return MatrixTriangularSolveShapeFn(c); }); REGISTER_OP("MatrixSolveLs") .Input("matrix: T") .Input("rhs: T") .Input("l2_regularizer: double") .Output("output: T") .Attr("T: {double, float, half, complex64, complex128}") .Attr("fast: bool = True") .SetShapeFn([](InferenceContext* c) { ShapeHandle l2_regularizer; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &l2_regularizer)); return MatrixSolveShapeFn(c, false ); }); REGISTER_OP("MatrixSquareRoot") .Input("input: T") .Output("output: T") .Attr("T: {double, float, half, complex64, complex128}") .SetShapeFn(BatchUnchangedSquareShapeFn); REGISTER_OP("Qr") .Input("input: T") .Output("q: T") .Output("r: T") .Attr("full_matrices: bool = False") .Attr("T: {double, float, half, complex64, complex128}") .SetShapeFn(QrShapeFn); REGISTER_OP("Svd") .Input("input: T") .Output("s: T") .Output("u: T") .Output("v: T") .Attr("compute_uv: bool = True") .Attr("full_matrices: bool = False") .Attr("T: {double, float, half, complex64, complex128}") .SetShapeFn(SvdShapeFn); REGISTER_OP("TridiagonalMatMul") .Input("superdiag: T") .Input("maindiag: T") .Input("subdiag: T") .Input("rhs: T") .Output("output: T") .Attr("T: {double, float, complex64, complex128}") .SetShapeFn(TridiagonalMatMulShapeFn); REGISTER_OP("TridiagonalSolve") .Input("diagonals: T") .Input("rhs: T") .Output("output: T") .Attr("partial_pivoting: bool = True") .Attr("perturb_singular: bool = False") .Attr("T: {double, float, complex64, complex128}") .SetShapeFn(TridiagonalSolveShapeFn); REGISTER_OP("Einsum") .Input("inputs: N * T") .Output("output: T") .Attr("equation: string") .Attr("N: int >= 1") .Attr("T: type") .SetShapeFn(shape_inference::EinsumShape); REGISTER_OP("BatchSelfAdjointEig") .Input("input: T") .Output("output: T") .Attr("T: {double, float}") .Deprecated(11, "Use SelfAdjointEigV2 instead.") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixDeterminant") .Input("input: T") .Output("output: T") .Attr("T: {float, double, complex64, complex128}") .Deprecated(13, "Use MatrixDeterminant instead.") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixInverse") .Input("input: T") .Output("output: T") .Attr("adjoint: bool = False") .Attr("T: {double, float}") .Deprecated(13, "Use MatrixInverse instead.") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchCholesky") .Input("input: T") .Output("output: T") .Attr("T: {double, float}") .Deprecated(13, "Use Cholesky instead.") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchCholeskyGrad") .Input("l: T") .Input("grad: T") .Output("output: T") .Attr("T: {float, double}") .Deprecated(13, "Use CholeskyGrad instead.") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchSelfAdjointEigV2") .Input("input: T") .Output("e: T") .Output("v: T") .Attr("compute_v: bool = True") .Attr("T: {double, float}") .Deprecated(13, "Use SelfAdjointEigV2 instead.") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixSolve") .Input("matrix: T") .Input("rhs: T") .Output("output: T") .Attr("adjoint: bool = False") .Attr("T: {double, float}") .Deprecated(13, "Use MatrixSolve instead.") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixTriangularSolve") .Input("matrix: T") .Input("rhs: T") .Output("output: T") .Attr("lower: bool = True") .Attr("adjoint: bool = False") .Attr("T: {double, float}") .Deprecated(13, "Use MatrixTriangularSolve instead.") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixSolveLs") .Input("matrix: T") .Input("rhs: T") .Input("l2_regularizer: double") .Output("output: T") .Attr("T: {double, float}") .Attr("fast: bool = True") .Deprecated(13, "Use MatrixSolveLs instead.") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchSvd") .Input("input: T") .Output("s: T") .Output("u: T") .Output("v: T") .Attr("compute_uv: bool = True") .Attr("full_matrices: bool = False") .Attr("T: {double, float, complex64, complex128}") .Deprecated(13, "Use Svd instead.") .SetShapeFn(shape_inference::UnknownShape); }
#include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(LinalgOpsTest, MatrixDeterminant_ShapeFn) { ShapeInferenceTestOp op("MatrixDeterminant"); INFER_OK(op, "?", "?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); INFER_ERROR("Dimensions must be equal, but are 2 and 1", op, "[1,?,3,4,1,2]"); INFER_OK(op, "[?,?]", "[]"); INFER_OK(op, "[1,?]", "[]"); INFER_OK(op, "[?,1]", "[]"); INFER_OK(op, "[1,?,3,4,?,?]", "[d0_0,d0_1,d0_2,d0_3]"); INFER_OK(op, "[1,?,3,4,1,?]", "[d0_0,d0_1,d0_2,d0_3]"); INFER_OK(op, "[1,?,3,4,?,1]", "[d0_0,d0_1,d0_2,d0_3]"); } TEST(LinalgOpsTest, UnchangedSquare_ShapeFn) { for (const char* op_name : {"Cholesky", "CholeskyGrad", "MatrixInverse"}) { ShapeInferenceTestOp op(op_name); const string extra_shape = (op.name == "CholeskyGrad" ? ";?" : ""); INFER_OK(op, "?" + extra_shape, "?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]" + extra_shape); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2]" + extra_shape); INFER_OK(op, "[?,?]" + extra_shape, "[d0_0|d0_1,d0_0|d0_1]"); INFER_OK(op, "[1,?]" + extra_shape, "[d0_0,d0_0]"); INFER_OK(op, "[?,1]" + extra_shape, "[d0_1,d0_1]"); INFER_OK(op, "[5,?,7,?,?]" + extra_shape, "[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]"); INFER_OK(op, "[5,?,7,1,?]" + extra_shape, "[d0_0,d0_1,d0_2,d0_3,d0_3]"); INFER_OK(op, "[5,?,7,?,1]" + extra_shape, "[d0_0,d0_1,d0_2,d0_4,d0_4]"); } } TEST(LinalgOpsTest, SelfAdjointEig_ShapeFn) { ShapeInferenceTestOp op("SelfAdjointEig"); INFER_OK(op, "?", "?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2]"); INFER_OK(op, "[?,?]", "[?,d0_0|d0_1]"); INFER_OK(op, "[1,?]", "[2,d0_0]"); INFER_OK(op, "[?,1]", "[2,d0_1]"); INFER_OK(op, "[5,?,7,?,?]", "[d0_0,d0_1,d0_2,?,d0_3|d0_4]"); INFER_OK(op, "[5,?,7,1,?]", "[d0_0,d0_1,d0_2,2,d0_3]"); INFER_OK(op, "[5,?,7,?,1]", "[d0_0,d0_1,d0_2,2,d0_4]"); } TEST(LinalgOpsTest, SelfAdjointEigV2_ShapeFn) { ShapeInferenceTestOp op("SelfAdjointEigV2"); auto set_compute_v = [&op](bool compute_v) { TF_ASSERT_OK(NodeDefBuilder("test", "Pack") .Input({{"input", 0, DT_FLOAT}}) .Attr("compute_v", compute_v) .Finalize(&op.node_def)); TF_ASSERT_OK(NodeDefBuilder("test", "Pack") .Input({{"input", 0, DT_HALF}}) .Attr("compute_v", compute_v) .Finalize(&op.node_def)); }; set_compute_v(false); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2]"); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[3,1,2]"); INFER_OK(op, "?", "?;[0]"); INFER_OK(op, "[?,?]", "[d0_0|d0_1];[0]"); INFER_OK(op, "[1,?]", "[d0_0|d0_1];[0]"); INFER_OK(op, "[?,1]", "[d0_0|d0_1];[0]"); INFER_OK(op, "[5,?,7,?,?]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[0]"); INFER_OK(op, "[5,?,7,1,?]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[0]"); INFER_OK(op, "[5,?,7,?,1]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[0]"); set_compute_v(true); INFER_OK(op, "?", "?;?"); INFER_OK(op, "[?,?]", "[d0_0|d0_1];[d0_0|d0_1,d0_0|d0_1]"); INFER_OK(op, "[1,?]", "[d0_0|d0_1];[d0_0|d0_1,d0_0|d0_1]"); INFER_OK(op, "[?,1]", "[d0_0|d0_1];[d0_0|d0_1,d0_0|d0_1]"); INFER_OK(op, "[5,?,7,?,?]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]"); INFER_OK(op, "[5,?,7,1,?]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]"); INFER_OK(op, "[5,?,7,?,1]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]"); } TEST(LinalgOpsTest, MatrixSolve_ShapeFn) { ShapeInferenceTestOp op("MatrixSolve"); INFER_OK(op, "?;?", "?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1];?"); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2];?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[5,?,?];[6]"); INFER_ERROR("Shapes must be equal rank, but are 0 and 1", op, "[5,?];[6,?,?]"); INFER_OK(op, "[?,?];?", "[d0_0|d0_1,?]"); INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]"); INFER_OK(op, "[?,?];[1,?]", "[d1_0,d1_1]"); INFER_OK(op, "[1,?];[1,?]", "[d0_0|d1_0,d1_1]"); INFER_OK(op, "[?,1];[1,?]", "[d0_1|d1_0,d1_1]"); INFER_OK(op, "[1,1];[?,?]", "[d0_0,d1_1]"); INFER_OK(op, "[1,1];[1,?]", "[d0_0|d0_1|d1_0,d1_1]"); INFER_OK(op, "[10,?,?,?];[?,20,1,?]", "[d0_0,d1_1,d1_2,d1_3]"); INFER_OK(op, "[10,?,1,?];[?,20,1,?]", "[d0_0,d1_1,d0_2|d1_2,d1_3]"); INFER_OK(op, "[10,?,?,1];[?,20,1,?]", "[d0_0,d1_1,d0_3|d1_2,d1_3]"); INFER_OK(op, "[10,?,1,1];[?,20,?,?]", "[d0_0,d1_1,d0_2,d1_3]"); INFER_OK(op, "[10,?,1,1];[?,20,1,?]", "[d0_0,d1_1,d0_2|d0_3|d1_2,d1_3]"); } TEST(LinalgOpsTest, MatrixTriangularSolve_ShapeFn) { ShapeInferenceTestOp op("MatrixTriangularSolve"); INFER_OK(op, "?;?", "?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1];?"); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2];?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[5,?,?];[6]"); INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]"); INFER_OK(op, "[?,?];[1,?]", "[d1_0,d1_1]"); INFER_OK(op, "[1,?];[1,?]", "[d0_0|d1_0,d1_1]"); INFER_OK(op, "[?,1];[1,?]", "[d0_1|d1_0,d1_1]"); INFER_OK(op, "[1,1];[?,?]", "[d0_0,d1_1]"); INFER_OK(op, "[1,1];[1,?]", "[d0_0|d0_1|d1_0,d1_1]"); INFER_OK(op, "[10,?,?,?];[?,20,1,?]", "[d0_0,d1_1,d1_2,d1_3]"); INFER_OK(op, "[10,?,1,?];[?,20,1,?]", "[d0_0,d1_1,d0_2|d1_2,d1_3]"); INFER_OK(op, "[10,?,?,1];[?,20,1,?]", "[d0_0,d1_1,d0_3|d1_2,d1_3]"); INFER_OK(op, "[10,?,1,1];[?,20,?,?]", "[d0_0,d1_1,d0_2,d1_3]"); INFER_OK(op, "[10,?,1,1];[?,20,1,?]", "[d0_0,d1_1,d0_2|d0_3|d1_2,d1_3]"); } TEST(LinalgOpsTest, MatrixSolveLs_ShapeFn) { ShapeInferenceTestOp op("MatrixSolveLs"); INFER_OK(op, "?;?;?", "?"); INFER_OK(op, "?;?;[]", "?"); INFER_OK(op, "[1,?];[1,?];?", "[d0_1,d1_1]"); INFER_OK(op, "[1,2];[1,3];?", "[d0_1,d1_1]"); INFER_ERROR("Dimensions must be equal, but are 5 and 6", op, "[5,?];[6,?];?"); INFER_OK(op, "[10,?,1,?];[?,20,1,?];?", "[d0_0,d1_1,d0_3,d1_3]"); INFER_OK(op, "[10,20,1,2];[10,20,1,3];?", "[d0_0|d1_0,d0_1|d1_1,d0_3,d1_3]"); INFER_ERROR("Dimensions must be equal, but are 5 and 6", op, "[10,?,5,?];[?,20,6,?];?"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 10 and 11", op, "[10,?,5,?];[11,?,5,?];?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?];?;?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "?;[?];?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[1]"); } TEST(LinalgOpsTest, Qr_ShapeFn) { ShapeInferenceTestOp op("Qr"); auto set_attrs = [&op](bool full_matrices) { TF_ASSERT_OK(NodeDefBuilder("test", "Qr") .Input({"input", 0, DT_FLOAT}) .Attr("full_matrices", full_matrices) .Finalize(&op.node_def)); TF_ASSERT_OK(NodeDefBuilder("test", "Qr") .Input({"input", 0, DT_HALF}) .Attr("full_matrices", full_matrices) .Finalize(&op.node_def)); }; set_attrs(false); INFER_OK(op, "?", "?;?"); INFER_OK(op, "[?,?,?]", "[d0_0,d0_1,?];[d0_0,?,d0_2]"); INFER_OK(op, "[4,?,?]", "[d0_0,d0_1,?];[d0_0,?,d0_2]"); INFER_OK(op, "[4,2,?]", "[d0_0,d0_1,?];[d0_0,?,d0_2]"); INFER_OK(op, "[4,?,2]", "[d0_0,d0_1,?];[d0_0,?,d0_2]"); INFER_OK(op, "[?,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[4,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[?,3,2]", "[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[4,3,2]", "[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[?,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[4,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); set_attrs(true); INFER_OK(op, "?", "?;?"); INFER_OK(op, "[?,?,?]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[4,?,?]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[4,2,?]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[4,?,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[?,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[4,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[?,3,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[4,3,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[?,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_OK(op, "[4,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); } TEST(LinalgOpsTest, Svd_ShapeFn) { ShapeInferenceTestOp op("Svd"); auto set_attrs = [&op](bool compute_uv, bool full_matrices) { TF_ASSERT_OK(NodeDefBuilder("test", "Svd") .Input({"input", 0, DT_FLOAT}) .Attr("compute_uv", compute_uv) .Attr("full_matrices", full_matrices) .Finalize(&op.node_def)); TF_ASSERT_OK(NodeDefBuilder("test", "Svd") .Input({"input", 0, DT_HALF}) .Attr("compute_uv", compute_uv) .Attr("full_matrices", full_matrices) .Finalize(&op.node_def)); }; set_attrs(false, false); INFER_OK(op, "?", "?;[0];[0]"); INFER_OK(op, "[?,?,?]", "[d0_0,?];[0];[0]"); INFER_OK(op, "[4,?,?]", "[d0_0,?];[0];[0]"); INFER_OK(op, "[4,2,?]", "[d0_0,?];[0];[0]"); INFER_OK(op, "[4,?,2]", "[d0_0,?];[0];[0]"); INFER_OK(op, "[?,2,2]", "[d0_0,d0_1];[0];[0]"); INFER_OK(op, "[4,2,2]", "[d0_0,d0_1];[0];[0]"); INFER_OK(op, "[?,3,2]", "[d0_0,d0_2];[0];[0]"); INFER_OK(op, "[4,3,2]", "[d0_0,d0_2];[0];[0]"); INFER_OK(op, "[?,2,3]", "[d0_0,d0_1];[0];[0]"); INFER_OK(op, "[4,2,3]", "[d0_0,d0_1];[0];[0]"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); set_attrs(true, false); INFER_OK(op, "?", "?;?;?"); INFER_OK(op, "[?,?,?]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]"); INFER_OK(op, "[4,?,?]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]"); INFER_OK(op, "[4,2,?]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]"); INFER_OK(op, "[4,?,2]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]"); INFER_OK(op, "[?,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]"); INFER_OK(op, "[4,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]"); INFER_OK(op, "[?,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[4,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[?,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]"); INFER_OK(op, "[4,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); set_attrs(true, true); INFER_OK(op, "?", "?;?;?"); INFER_OK(op, "[?,?,?]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[4,?,?]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[4,2,?]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[4,?,2]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[?,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[4,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[?,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[4,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[?,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_OK(op, "[4,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); } TEST(LinalgOpsTest, Lu_ShapeFn) { ShapeInferenceTestOp op("Lu"); INFER_OK(op, "?", "?;?"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,?,3,4,1,2]"); INFER_OK(op, "[?,?]", "[d0_0,d0_0];[d0_0]"); INFER_OK(op, "[1,?]", "[d0_0,d0_0];[d0_0]"); INFER_OK(op, "[?,1]", "[d0_1,d0_1];[d0_1]"); INFER_OK(op, "[1,?,3,4,?,?]", "[d0_0,d0_1,d0_2,d0_3,d0_4,d0_4];[d0_0,d0_1,d0_2,d0_3,d0_4]"); INFER_OK(op, "[1,?,3,4,1,?]", "[d0_0,d0_1,d0_2,d0_3,d0_4,d0_4];[d0_0,d0_1,d0_2,d0_3,d0_4]"); INFER_OK(op, "[1,?,3,4,?,1]", "[d0_0,d0_1,d0_2,d0_3,d0_5,d0_5];[d0_0,d0_1,d0_2,d0_3,d0_5]"); } TEST(LinalgOpsTest, TridiagonalMatMul_ShapeFn) { ShapeInferenceTestOp op("TridiagonalMatMul"); INFER_OK(op, "?;?;?;?", "in3"); INFER_OK(op, "[1,5];[1,5];[1,5];[?,1]", "in3"); INFER_OK(op, "[1,5];[1,5];[1,5];[5,1]", "in3"); INFER_OK(op, "[?,1,?];[?,1,?];[?,1,?];[?,?,?]", "in3"); INFER_OK(op, "[?,1,5];[?,1,5];[?,1,5];[7,5,2]", "in3"); INFER_OK(op, "[7,1,5];[7,1,5];[7,1,5];[?,5,2]", "in3"); INFER_OK(op, "[7,1,5];[7,1,5];[7,1,5];[7,5,2]", "in3"); INFER_OK(op, "[7,?,1,5];[7,?,1,5];[7,?,1,5];[7,8,5,2]", "in3"); INFER_OK(op, "[7,8,1,5];[7,8,1,5];[7,8,1,5];[7,8,5,2]", "in3"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3];[3];[3];[5,1]"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3,5];[3,5];[3,5];[5]"); INFER_ERROR( "Dimension 1 in both shapes must be equal, but are 4 and 8. " "Shapes are [6,4] and [6,8].", op, "[6,4,3,5];[6,4,3,5];[6,4,3,5];[6,8,5,2]"); INFER_ERROR( "Dimension 1 in both shapes must be equal, but are 4 and 8. " "Shapes are [?,4] and [6,8].", op, "[?,4,3,5];[?,4,3,5];[?,4,3,5];[6,8,5,2]"); INFER_ERROR( "Dimension 1 in both shapes must be equal, but are 5 and 6. " "Shapes are [1,5] and [1,6]", op, "[1,5];[1,6];[1,5];[6,2]"); INFER_ERROR("Dimension must be 1 but is 3", op, "[3,5];[3,5];[3,5];[5,2]"); } TEST(LinalgOpsTest, TridiagonalSolve_ShapeFn) { ShapeInferenceTestOp op("TridiagonalSolve"); INFER_OK(op, "?;?", "in1"); INFER_OK(op, "[3,5];[?,1]", "in1"); INFER_OK(op, "[?,5];[5,1]", "in1"); INFER_OK(op, "[?,5];[?,?]", "in1"); INFER_OK(op, "[?,?];[?,?]", "in1"); INFER_OK(op, "[3,5];[5,1]", "in1"); INFER_OK(op, "[3,5];[5,2]", "in1"); INFER_OK(op, "[?,?,?];[?,?,?]", "in1"); INFER_OK(op, "[?,3,5];[7,5,2]", "in1"); INFER_OK(op, "[7,3,5];[?,5,2]", "in1"); INFER_OK(op, "[7,?,5];[?,5,?]", "in1"); INFER_OK(op, "[7,3,5];[7,5,2]", "in1"); INFER_OK(op, "[7,?,3,5];[7,8,5,2]", "in1"); INFER_OK(op, "[7,8,3,5];[7,8,5,2]", "in1"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3];[5,1]"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3,5];[5]"); INFER_ERROR( "Dimension 1 in both shapes must be equal, but are 4 and 8. " "Shapes are [6,4] and [6,8].", op, "[6,4,3,5];[6,8,5,2]"); INFER_ERROR( "Dimension 1 in both shapes must be equal, but are 4 and 8. " "Shapes are [?,4] and [6,8].", op, "[?,4,3,5];[6,8,5,2]"); INFER_ERROR("Dimension must be 3 but is 4", op, "[4,5];[5,2]"); INFER_ERROR("Dimension must be 3 but is 4", op, "[6,4,5];[6,5,2]"); INFER_ERROR("Dimensions must be equal, but are 9 and 5", op, "[3,9];[5,2]"); INFER_ERROR("Dimensions must be equal, but are 9 and 5", op, "[6,3,9];[6,5,2]"); } }
Status TridiagonalMatMulShapeFn(InferenceContext* c) { ShapeHandle superdiag; ShapeHandle maindiag; ShapeHandle subdiag; ShapeHandle rhs; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &superdiag)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &maindiag)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 2, &subdiag)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(3), 2, &rhs)); ShapeHandle superdiag_batch_shape; ShapeHandle maindiag_batch_shape; ShapeHandle subdiag_batch_shape; ShapeHandle rhs_batch_shape; TF_RETURN_IF_ERROR(c->Subshape(superdiag, 0, -2, &superdiag_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(maindiag, 0, -2, &maindiag_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(subdiag, 0, -2, &subdiag_batch_shape)); TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape)); TF_RETURN_IF_ERROR(c->Merge(superdiag, maindiag, &superdiag)); TF_RETURN_IF_ERROR( c->Merge(maindiag_batch_shape, rhs_batch_shape, &rhs_batch_shape)); TF_RETURN_IF_ERROR( c->Merge(subdiag_batch_shape, rhs_batch_shape, &rhs_batch_shape)); TF_RETURN_IF_ERROR(c->Merge(superdiag, maindiag, &maindiag)); TF_RETURN_IF_ERROR(c->Merge(subdiag, maindiag, &maindiag)); DimensionHandle m_lhs = c->Dim(maindiag, -1); DimensionHandle m_rhs = c->Dim(rhs, -2); TF_RETURN_IF_ERROR(c->Merge(m_lhs, m_rhs, &m_lhs)); DimensionHandle unused; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(maindiag, -2), 1, &unused)); c->set_output(0, rhs); return absl::OkStatus(); }
TEST(LinalgOpsTest, TridiagonalMatMul_ShapeFn) { ShapeInferenceTestOp op("TridiagonalMatMul"); INFER_OK(op, "?;?;?;?", "in3"); INFER_OK(op, "[1,5];[1,5];[1,5];[?,1]", "in3"); INFER_OK(op, "[1,5];[1,5];[1,5];[5,1]", "in3"); INFER_OK(op, "[?,1,?];[?,1,?];[?,1,?];[?,?,?]", "in3"); INFER_OK(op, "[?,1,5];[?,1,5];[?,1,5];[7,5,2]", "in3"); INFER_OK(op, "[7,1,5];[7,1,5];[7,1,5];[?,5,2]", "in3"); INFER_OK(op, "[7,1,5];[7,1,5];[7,1,5];[7,5,2]", "in3"); INFER_OK(op, "[7,?,1,5];[7,?,1,5];[7,?,1,5];[7,8,5,2]", "in3"); INFER_OK(op, "[7,8,1,5];[7,8,1,5];[7,8,1,5];[7,8,5,2]", "in3"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3];[3];[3];[5,1]"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3,5];[3,5];[3,5];[5]"); INFER_ERROR( "Dimension 1 in both shapes must be equal, but are 4 and 8. " "Shapes are [6,4] and [6,8].", op, "[6,4,3,5];[6,4,3,5];[6,4,3,5];[6,8,5,2]"); INFER_ERROR( "Dimension 1 in both shapes must be equal, but are 4 and 8. " "Shapes are [?,4] and [6,8].", op, "[?,4,3,5];[?,4,3,5];[?,4,3,5];[6,8,5,2]"); INFER_ERROR( "Dimension 1 in both shapes must be equal, but are 5 and 6. " "Shapes are [1,5] and [1,6]", op, "[1,5];[1,6];[1,5];[6,2]"); INFER_ERROR("Dimension must be 1 but is 3", op, "[3,5];[3,5];[3,5];[5,2]"); }
#include "xla/service/tuple_points_to_analysis.h" #include <memory> #include <ostream> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/map_util.h" #include "xla/service/hlo_dataflow_analysis.h" #include "xla/shape_util.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { std::string BufferAlias::ToString() const { return absl::StrCat("BufferAlias(", instruction_->name(), "[", absl::StrJoin(index_, ","), "])"); } std::ostream& operator<<(std::ostream& out, const BufferAlias& buffer_alias) { out << buffer_alias.ToString(); return out; } bool PointsToSet::IsAmbiguous() const { bool ambiguous = false; ForEachElement( [&ambiguous](const ShapeIndex& , const BufferList& points_to) { ambiguous |= points_to.size() > 1; }); return ambiguous; } bool PointsToSet::IsDistinct() const { bool distinct = true; absl::flat_hash_set<const LogicalBuffer*> all_points_to; ForEachElement([&](const ShapeIndex& , const BufferList& points_to) { for (auto& buffer : points_to) { if (all_points_to.contains(buffer)) { distinct = false; } all_points_to.insert(buffer); } }); return distinct; } size_t PointsToSet::size() const { return CreateFlattenedSet().size(); } PointsToSet::BufferSet PointsToSet::CreateFlattenedSet() const { BufferSet flat_set; ForEachElement( [&flat_set](const ShapeIndex& , const BufferList& buffers) { flat_set.insert(buffers.begin(), buffers.end()); }); return flat_set; } bool PointsToSet::ContainsBuffer(const LogicalBuffer& buffer) const { bool found = false; ForEachElement([&found, &buffer](const ShapeIndex& , const BufferList& pointed_to_buffers) { if (!found && absl::c_linear_search(pointed_to_buffers, &buffer)) { found = true; } }); return found; } bool PointsToSet::ContainsBufferAtIndex(const LogicalBuffer& buffer, const ShapeIndex& index) const { const auto& pointed_to_buffers = element(index); return absl::c_linear_search(pointed_to_buffers, &buffer); } void PointsToSet::AddPointedToBuffer(const LogicalBuffer& buffer, const ShapeIndex& index) { if (ContainsBufferAtIndex(buffer, index)) { return; } mutable_element(index)->push_back(&buffer); } const PointsToSet::SourceSet& PointsToSet::tuple_sources( const ShapeIndex& index) const { return tree_.element(index).tuple_sources; } void PointsToSet::add_tuple_source(const ShapeIndex& index, HloInstruction* tuple) { tree_.mutable_element(index)->tuple_sources.insert(tuple); } namespace { void GatherFusionInstructions( HloInstruction* instruction, std::vector<HloInstruction*>* fusion_instructions) { CHECK_EQ(HloOpcode::kFusion, instruction->opcode()); for (auto* fused : instruction->fused_instructions()) { if (fused->opcode() == HloOpcode::kFusion) { GatherFusionInstructions(fused, fusion_instructions); } } fusion_instructions->push_back(instruction); } } absl::StatusOr<std::unique_ptr<TuplePointsToAnalysis>> TuplePointsToAnalysis::Run(const HloModule* module) { auto logical_buffer_analysis = LogicalBufferAnalysis::Run(module); std::unique_ptr<TuplePointsToAnalysis> analysis(new TuplePointsToAnalysis( module, std::move(logical_buffer_analysis).value())); TF_RETURN_IF_ERROR(analysis->Analyze()); return std::move(analysis); } absl::Status TuplePointsToAnalysis::Analyze() { per_instruction_.clear(); per_instruction_.reserve(module_->instruction_count()); logical_buffer_aliases_.clear(); logical_buffer_aliases_.resize( logical_buffer_analysis_->num_logical_buffers()); std::vector<HloInstruction*> fusion_instructions; for (auto* computation : module_->MakeNonfusionComputations()) { TF_RETURN_IF_ERROR(computation->Accept(this)); TF_RETURN_IF_ERROR( PopulateDefinedBuffersAndAliases(computation->instructions())); for (auto* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kFusion) { GatherFusionInstructions(instruction, &fusion_instructions); } } } for (auto* instruction : fusion_instructions) { TF_RETURN_IF_ERROR(instruction->fused_expression_root()->Accept(this)); TF_RETURN_IF_ERROR( PopulateDefinedBuffersAndAliases(instruction->fused_instructions())); } XLA_VLOG_LINES(3, ToString()); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::PopulateDefinedBuffersAndAliases( const decltype(std::declval<HloComputation>() .instructions())& instructions) { for (auto* instruction : instructions) { PerInstruction* pi = PerInst(instruction); TF_RETURN_IF_ERROR(GatherBuffersDefinedByInstruction( instruction, &pi->instruction_defined_buffers)); const PointsToSet& points_to_set = GetPointsToSet(instruction); points_to_set.ForEachElement( [this, &instruction]( const ShapeIndex& index, const PointsToSet::BufferList& pointed_to_buffers) { for (const LogicalBuffer* buffer : pointed_to_buffers) { logical_buffer_aliases_[buffer->id()].emplace_back(instruction, index); } }); } return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::DefaultAction( HloInstruction* hlo_instruction) { PointsToSet& points_to_set = CreateEmptyPointsToSet(hlo_instruction); points_to_set.ForEachMutableElement( [this, hlo_instruction](const ShapeIndex& index, PointsToSet::BufferList* buffers) { buffers->push_back( &logical_buffer_analysis_->GetBuffer(hlo_instruction, index)); }); if (hlo_instruction->shape().IsTuple()) { points_to_set.add_tuple_source({}, hlo_instruction); } return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleGetTupleElement( HloInstruction* get_tuple_element) { int64_t element_index = get_tuple_element->tuple_index(); PointsToSet& points_to_set = CreateEmptyPointsToSet(get_tuple_element); const PointsToSet& operand_points_to_set = *PerInst(get_tuple_element->operand(0))->points_to_set; points_to_set.ForEachMutableElement( [&](const ShapeIndex& target_index, PointsToSet::BufferList* points_to) { ShapeIndex src_index; src_index.push_back(element_index); for (auto element : target_index) { src_index.push_back(element); } *points_to = operand_points_to_set.element(src_index); for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleCopy(HloInstruction* copy) { PointsToSet& points_to_set = CreateCopiedPointsToSet(copy, copy->operand(0)); points_to_set.mutable_element({})->clear(); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(copy, {}), {}); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleBitcast(HloInstruction* bitcast) { CreateCopiedPointsToSet(bitcast, bitcast->operand(0)); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleDomain(HloInstruction* domain) { CreateCopiedPointsToSet(domain, domain->operand(0)); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleAddDependency( HloInstruction* add_dependency) { CreateCopiedPointsToSet(add_dependency, add_dependency->operand(0)); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleRecvDone(HloInstruction* recv_done) { PointsToSet& points_to_set = CreateEmptyPointsToSet(recv_done); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(recv_done, {}), {}); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(recv_done, {1}), {1}); const PointsToSet& operand_points_to_set = GetPointsToSet(recv_done->operand(0)); points_to_set.ForEachMutableElement( [&points_to_set, &operand_points_to_set]( const ShapeIndex& index, PointsToSet::BufferList* buffers) { if (index.empty() || index[0] != 0) { return; } *buffers = operand_points_to_set.element(index); for (auto& tuple_source : operand_points_to_set.tuple_sources(index)) { points_to_set.add_tuple_source(index, tuple_source); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleAsyncStart( HloInstruction* async_start) { PointsToSet& points_to_set = CreateEmptyPointsToSet(async_start); points_to_set.ForEachMutableElement( [&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) { if (target_index.size() >= 2 && target_index.front() == 0) { const PointsToSet& operand_points_to_set = GetPointsToSet(async_start->operand(target_index[1])); ShapeIndex source_index(target_index.begin() + 2, target_index.end()); *buffers = operand_points_to_set.element(source_index); for (HloInstruction* tuple : operand_points_to_set.tuple_sources(source_index)) { points_to_set.add_tuple_source(target_index, tuple); } } else { buffers->push_back( &logical_buffer_analysis_->GetBuffer(async_start, target_index)); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleAsyncUpdate( HloInstruction* async_update) { PointsToSet& points_to_set = CreateEmptyPointsToSet(async_update); const PointsToSet& operand_points_to_set = GetPointsToSet(async_update->operand(0)); CHECK_EQ(async_update->shape(), async_update->operand(0)->shape()); points_to_set.ForEachMutableElement([&](const ShapeIndex& index, PointsToSet::BufferList* buffers) { *buffers = operand_points_to_set.element(index); for (HloInstruction* tuple : operand_points_to_set.tuple_sources(index)) { points_to_set.add_tuple_source(index, tuple); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleAsyncDone( HloInstruction* async_done) { PointsToSet& points_to_set = CreateEmptyPointsToSet(async_done); const PointsToSet& operand_points_to_set = GetPointsToSet(async_done->operand(0)); operand_points_to_set.ForEachElement( [&points_to_set, &operand_points_to_set]( const ShapeIndex& src_index, const PointsToSet::BufferList& points_to) { if (!src_index.empty() && src_index.front() == 1) { const ShapeIndex target_index(src_index.begin() + 1, src_index.end()); *points_to_set.mutable_element(target_index) = points_to; for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleCopyStart( HloInstruction* copy_start) { PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_start); const PointsToSet& operand_points_to_set = GetPointsToSet(copy_start->operand(0)); points_to_set.ForEachMutableElement( [&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) { if (target_index == ShapeIndex({1})) { *buffers = operand_points_to_set.element({}); } else { buffers->push_back( &logical_buffer_analysis_->GetBuffer(copy_start, target_index)); } }); for (HloInstruction* tuple : operand_points_to_set.tuple_sources({})) { points_to_set.add_tuple_source({1}, tuple); } return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleCopyDone(HloInstruction* copy_done) { PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_done); const PointsToSet& operand_points_to_set = GetPointsToSet(copy_done->operand(0)); operand_points_to_set.ForEachElement( [&points_to_set, &operand_points_to_set]( const ShapeIndex& src_index, const PointsToSet::BufferList& points_to) { if (src_index == ShapeIndex({0})) { const ShapeIndex target_index = {}; *points_to_set.mutable_element(target_index) = points_to; for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleSend(HloInstruction* send) { PointsToSet& points_to_set = CreateEmptyPointsToSet(send); auto top_buffer = points_to_set.mutable_element(ShapeIndex({})); top_buffer->push_back( &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({}))); points_to_set.add_tuple_source({}, send); auto context_buffer = points_to_set.mutable_element(ShapeIndex({1})); context_buffer->push_back( &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({1}))); auto token_buffer = points_to_set.mutable_element(ShapeIndex({2})); token_buffer->push_back( &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({2}))); const PointsToSet& operand_points_to_set = GetPointsToSet(send->operand(0)); operand_points_to_set.ForEachElement( [&points_to_set, &operand_points_to_set]( const ShapeIndex& src_index, const PointsToSet::BufferList& points_to) { ShapeIndex target_index({0}); for (auto element : src_index) { target_index.push_back(element); } *points_to_set.mutable_element(target_index) = points_to; for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleTuple(HloInstruction* tuple) { absl::Span<HloInstruction* const> operands(tuple->operands()); PointsToSet& points_to_set = CreateEmptyPointsToSet(tuple); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(tuple, {}), {}); for (int64_t i = 0; i < operands.size(); ++i) { const PointsToSet& operand_points_to_set = *PerInst(operands[i])->points_to_set; operand_points_to_set.ForEachElement( [&points_to_set, &operand_points_to_set, i]( const ShapeIndex& src_index, const PointsToSet::BufferList& points_to) { ShapeIndex target_index; target_index.push_back(i); for (auto element : src_index) { target_index.push_back(element); } *points_to_set.mutable_element(target_index) = points_to; for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } }); } points_to_set.add_tuple_source({}, tuple); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleCustomCall( HloInstruction* custom_call) { auto ccall = Cast<HloCustomCallInstruction>(custom_call); PointsToSet& points_to_set = CreateEmptyPointsToSet(custom_call); absl::flat_hash_map<ShapeIndex, std::pair<int64_t, ShapeIndex>> aliased_outputs; for (const auto& pair : ccall->output_to_operand_aliasing()) { aliased_outputs.emplace(pair.first, pair.second); } points_to_set.ForEachMutableElement([&](const ShapeIndex& index, PointsToSet::BufferList* buffers) { auto it = aliased_outputs.find(index); if (it == aliased_outputs.end() || !alias_buffer_across_dataflow_) { points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(custom_call, index), index); } else { const PointsToSet& input_set = *PerInst(ccall->operand(it->second.first))->points_to_set; for (const LogicalBuffer* input_buffer : input_set.element(it->second.second)) { points_to_set.AddPointedToBuffer(*input_buffer, index); } for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) { points_to_set.add_tuple_source(index, tuple); } } }); points_to_set.add_tuple_source({}, custom_call); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleFusion(HloInstruction* fusion) { auto cfusion = Cast<HloFusionInstruction>(fusion); PointsToSet& points_to_set = CreateEmptyPointsToSet(fusion); absl::flat_hash_map<ShapeIndex, std::pair<int64_t, ShapeIndex>> aliased_outputs; for (const auto& pair : cfusion->output_to_operand_aliasing()) { aliased_outputs.emplace(pair.first, pair.second); } points_to_set.ForEachMutableElement([&](const ShapeIndex& index, PointsToSet::BufferList* buffers) { auto it = aliased_outputs.find(index); if (it == aliased_outputs.end()) { points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(fusion, index), index); } else { const PointsToSet& input_set = *PerInst(cfusion->operand(it->second.first))->points_to_set; for (const LogicalBuffer* input_buffer : input_set.element(it->second.second)) { points_to_set.AddPointedToBuffer(*input_buffer, index); } for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) { points_to_set.add_tuple_source(index, tuple); } } }); points_to_set.add_tuple_source({}, fusion); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleOptimizationBarrier( HloInstruction* barrier) { CreateCopiedPointsToSet(barrier, barrier->operand(0)); return absl::OkStatus(); } const PointsToSet& TuplePointsToAnalysis::GetPointsToSet( const HloInstruction* hlo_instruction) const { return *PerInst(hlo_instruction)->points_to_set; } PointsToSet& TuplePointsToAnalysis::CreateEmptyPointsToSet( const HloInstruction* instruction) { PerInstruction* pi = PerInst(instruction); CHECK(pi->points_to_set == nullptr) << "instruction should not have been present in the map."; auto set = std::make_unique<PointsToSet>(&instruction->shape()); pi->points_to_set = std::move(set); return *pi->points_to_set; } bool TuplePointsToAnalysis::InstructionDefinesBufferAtIndex( const HloInstruction* instruction, const ShapeIndex& index) const { const auto& buffers = GetPointsToSet(instruction).element(index); return (buffers.size() == 1 && buffers[0]->instruction() == instruction); } absl::Status TuplePointsToAnalysis::VerifyBuffer( const LogicalBuffer& buffer) const { if (!InstructionDefinesBufferAtIndex(buffer.instruction(), buffer.index())) { return FailedPrecondition( "LogicalBuffer %s is ill-defined: instruction %s does not define a " "buffer at that index", buffer.ToString(), buffer.instruction()->name()); } if (buffer.id() < 0 || buffer.id() >= logical_buffer_analysis_->num_logical_buffers()) { return FailedPrecondition("LogicalBuffer %s is ill-defined: invalid id %d", buffer.ToString(), buffer.id()); } if (GetBuffer(buffer.id()).instruction() != buffer.instruction() || GetBuffer(buffer.id()).index() != buffer.index()) { return FailedPrecondition( "LogicalBuffer %s is ill-defined: buffer with same id differs: %s", buffer.ToString(), GetBuffer(buffer.id()).ToString()); } return absl::OkStatus(); } const LogicalBuffer& TuplePointsToAnalysis::GetBuffer( LogicalBuffer::Id id) const { CHECK_GE(id, 0); CHECK_LT(id, logical_buffer_analysis_->num_logical_buffers()); return logical_buffer_analysis_->GetBuffer(id); } absl::StatusOr<const LogicalBuffer*> TuplePointsToAnalysis::GetBufferDefinedAt( const HloInstruction* instruction, const ShapeIndex& index) const { const auto& buffers = GetPointsToSet(instruction).element(index); if (buffers.size() != 1 || buffers[0]->instruction() != instruction) { return FailedPrecondition( "instruction %s does not define buffer at index {%s}", instruction->name(), absl::StrJoin(index, ",")); } return buffers[0]; } const TuplePointsToAnalysis::BufferAliasVector& TuplePointsToAnalysis::GetBufferAliases(const LogicalBuffer& buffer) const { return logical_buffer_aliases_[buffer.id()]; } const TuplePointsToAnalysis::BufferDefinitionVector& TuplePointsToAnalysis::GetBuffersDefinedByInstruction( const HloInstruction* instruction) const { return PerInst(instruction)->instruction_defined_buffers; } absl::Status TuplePointsToAnalysis::GatherBuffersDefinedByInstruction( const HloInstruction* instruction, TuplePointsToAnalysis::BufferDefinitionVector* buffers) { GetPointsToSet(instruction) .ForEachElement([buffers, instruction]( const ShapeIndex& index, const PointsToSet::BufferList& source_buffers) { CHECK(!source_buffers.empty()); if (source_buffers.size() == 1 && source_buffers[0]->instruction() == instruction) { DCHECK(source_buffers[0]->index() == index); buffers->push_back(source_buffers[0]); } else { for (const LogicalBuffer* source_buffer : source_buffers) { DCHECK(source_buffer->instruction() != instruction); } } }); return absl::OkStatus(); } PointsToSet& TuplePointsToAnalysis::CreateCopiedPointsToSet( const HloInstruction* instruction, const HloInstruction* src) { PointsToSet& dst_points_to_set = CreateEmptyPointsToSet(instruction); const PointsToSet& src_points_to_set = GetPointsToSet(src); dst_points_to_set.ForEachMutableElement( [&dst_points_to_set, &src_points_to_set]( const ShapeIndex& index, PointsToSet::BufferList* buffers) { *buffers = src_points_to_set.element(index); for (auto& tuple_source : src_points_to_set.tuple_sources(index)) { dst_points_to_set.add_tuple_source(index, tuple_source); } }); return *PerInst(instruction)->points_to_set; } std::string TuplePointsToAnalysis::ToString() const { std::string output = absl::StrFormat("TuplePointsToSet for module %s:\n", module_->name()); for (const auto* computation : module_->MakeNonfusionComputations()) { const char* entry = computation == module_->entry_computation() ? "entry " : ""; absl::StrAppend(&output, entry, "computation ", computation->name(), ":\n"); for (const HloInstruction* instruction : computation->MakeInstructionPostOrder()) { InstructionToString(instruction, &output); if (instruction->opcode() == HloOpcode::kFusion) { for (auto* fused : instruction->fused_instructions()) { InstructionToString(fused, &output); } } } } absl::StrAppend(&output, "LogicalBuffers:\n"); for (const auto& b : logical_buffer_analysis_->logical_buffers()) { absl::StrAppend(&output, " buffer ", b->ToString(), ":\n"); for (const BufferAlias& alias : logical_buffer_aliases_[b->id()]) { absl::StrAppend(&output, " alias ", alias.ToString(), "\n"); } } return output; } void TuplePointsToAnalysis::InstructionToString( const HloInstruction* instruction, std::string* output) const { const std::string prefix = instruction->IsFused() ? " " : ""; absl::StrAppend(output, prefix, " instruction ", instruction->ToShortString(), ":\n"); const PointsToSet& points_to_set = GetPointsToSet(instruction); points_to_set.ForEachElement( [&prefix, &output](const ShapeIndex& index, const PointsToSet::BufferList& points_to) { absl::StrAppend( output, prefix, " {", absl::StrJoin(index, ","), "}: ", absl::StrJoin(points_to, ", ", [](std::string* out, const LogicalBuffer* source) { out->append(source->ToString()); }), "\n"); }); } bool TuplePointsToAnalysis::DoesNotUseOperandBuffer( const HloInstruction* operand, const ShapeIndex& index, const HloInstruction* user) const { CHECK(user->IsUserOf(operand)) << "user: " << user->ToString() << " operand: " << operand->ToString(); if (user->opcode() == HloOpcode::kGetTupleElement && !index.empty()) { return true; } else if (user->IsLoopFusion()) { auto it = absl::c_find_if( user->fused_parameters(), [&](HloInstruction* fused_param) { return user->operand(fused_param->parameter_number()) == operand; }); CHECK(it != user->fused_parameters().end()); const LogicalBuffer* buffer = GetBufferDefinedAt(*it, index).value(); for (const BufferAlias& alias : GetBufferAliases(*buffer)) { for (HloInstruction* alias_user : alias.instruction()->users()) { if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(), alias_user)) { continue; } return false; } } return true; } return false; } std::vector<std::pair<HloInstruction*, int64_t>> TuplePointsToAnalysis::GetAllUsesOfInstructionAtIndex( HloInstruction* instruction, const ShapeIndex& index) const { std::vector<std::pair<HloInstruction*, int64_t>> uses; const PointsToSet::BufferList& points_to = GetPointsToSet(instruction).element(index); for (const LogicalBuffer* buffer : points_to) { for (const BufferAlias& alias : GetBufferAliases(*buffer)) { for (HloInstruction* alias_user : alias.instruction()->users()) { if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(), alias_user)) { continue; } for (int64_t op_idx : alias_user->OperandIndices(alias.instruction())) { uses.emplace_back(alias_user, op_idx); } } } } return uses; } bool TuplePointsToAnalysis::HasUniqueFusedUseOfOperandAt( HloInstruction* operand, const ShapeIndex& operand_index, HloInstruction* fusion, const int64_t use_operand_index) const { CHECK_EQ(HloOpcode::kFusion, fusion->opcode()); if (fusion->OperandIndices(operand).size() > 1) { return false; } const auto& fused_params = fusion->fused_parameters(); auto fused_param_it = absl::c_find_if(fused_params, [&](HloInstruction* fused_param) { return fusion->operand(fused_param->parameter_number()) == operand; }); if (fused_param_it == fused_params.end()) { return false; } auto* fused_param = *fused_param_it; auto
#include "xla/service/tuple_points_to_analysis.h" #include <map> #include <memory> #include <string> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal_util.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/instruction_fusion.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/logging.h" #include "tsl/platform/test.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { using ::testing::UnorderedElementsAre; using ::testing::UnorderedElementsAreArray; class TuplePointsToAnalysisTest : public HloTestBase { protected: void BuildModuleAndRunAnalysis(std::unique_ptr<HloComputation> computation) { BuildModule(std::move(computation)); RunAnalysis(); } void BuildModule(std::unique_ptr<HloComputation> computation) { module_ = CreateNewVerifiedModule(); module_->AddEntryComputation(std::move(computation)); } void RunAnalysis() { CHECK_NOTNULL(module_.get()); points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value(); } const LogicalBuffer* const GetBuffer(const HloInstruction* instruction, const ShapeIndex& index) { const auto& pointed_to = points_to_analysis_->GetPointsToSet(instruction).element(index); CHECK_EQ(1, pointed_to.size()); CHECK_EQ(instruction, pointed_to[0]->instruction()); CHECK(index == pointed_to[0]->index()); return pointed_to[0]; } void ExpectHasBuffers(const PointsToSet::BufferList& points_to_set, absl::Span<const LogicalBuffer* const> buffers) { std::vector<const LogicalBuffer*> vec(buffers.begin(), buffers.end()); EXPECT_THAT(points_to_set, UnorderedElementsAreArray(vec)); } void ExpectHasTopLevelBuffers( const PointsToSet::BufferList& points_to_set, absl::Span<HloInstruction* const> instructions) { PointsToSet::BufferList buffers; for (auto instruction : instructions) { buffers.push_back(GetBuffer(instruction, {})); } ExpectHasBuffers(points_to_set, buffers); } void ExpectHasTopLevelBuffers( const PointsToSet::BufferSet& points_to_set, absl::Span<HloInstruction* const> instructions) { ExpectHasTopLevelBuffers( PointsToSet::BufferList(points_to_set.begin(), points_to_set.end()), instructions); } void ExpectHasBufferAliases( const HloInstruction* instruction, const ShapeIndex& index, absl::Span<const std::pair<HloInstruction*, ShapeIndex>> expected) { const LogicalBuffer* buffer = points_to_analysis_->GetBufferDefinedAt(instruction, index).value(); std::vector<BufferAlias> expected_aliases; expected_aliases.reserve(expected.size()); for (auto& pair : expected) { expected_aliases.push_back(BufferAlias(pair.first, pair.second)); } EXPECT_THAT(points_to_analysis_->GetBufferAliases(*buffer), UnorderedElementsAreArray(expected_aliases)); } std::unique_ptr<HloModule> module_; std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_; }; } }
absl::Status TuplePointsToAnalysis::HandleRecvDone(HloInstruction* recv_done) { PointsToSet& points_to_set = CreateEmptyPointsToSet(recv_done); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(recv_done, {}), {}); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(recv_done, {1}), {1}); const PointsToSet& operand_points_to_set = GetPointsToSet(recv_done->operand(0)); points_to_set.ForEachMutableElement( [&points_to_set, &operand_points_to_set]( const ShapeIndex& index, PointsToSet::BufferList* buffers) { if (index.empty() || index[0] != 0) { return; } *buffers = operand_points_to_set.element(index); for (auto& tuple_source : operand_points_to_set.tuple_sources(index)) { points_to_set.add_tuple_source(index, tuple_source); } }); return absl::OkStatus(); }
#include "tsl/profiler/lib/profiler_factory.h" #include <memory> #include <utility> #include <vector> #include "tsl/platform/mutex.h" #include "tsl/profiler/lib/profiler_controller.h" #include "tsl/profiler/lib/profiler_interface.h" #include "tsl/profiler/protobuf/profiler_options.pb.h" namespace tsl { namespace profiler { namespace { mutex mu(LINKER_INITIALIZED); std::vector<ProfilerFactory>* GetFactories() { static auto factories = new std::vector<ProfilerFactory>(); return factories; } } void RegisterProfilerFactory(ProfilerFactory factory) { mutex_lock lock(mu); GetFactories()->push_back(std::move(factory)); } std::vector<std::unique_ptr<profiler::ProfilerInterface>> CreateProfilers( const tensorflow::ProfileOptions& options) { std::vector<std::unique_ptr<profiler::ProfilerInterface>> result; mutex_lock lock(mu); for (const auto& factory : *GetFactories()) { auto profiler = factory(options); if (profiler == nullptr) continue; result.emplace_back( std::make_unique<ProfilerController>(std::move(profiler))); } return result; } void ClearRegisteredProfilersForTest() { mutex_lock lock(mu); GetFactories()->clear(); } } }
#include "tsl/profiler/lib/profiler_factory.h" #include <functional> #include <utility> #include "absl/memory/memory.h" #include "absl/status/status.h" #include "tsl/platform/macros.h" #include "tsl/platform/test.h" #include "tsl/profiler/lib/profiler_interface.h" #include "tsl/profiler/protobuf/profiler_options.pb.h" #include "tsl/profiler/protobuf/xplane.pb.h" namespace tsl { namespace profiler { namespace { class TestProfiler : public ProfilerInterface { public: absl::Status Start() override { return absl::OkStatus(); } absl::Status Stop() override { return absl::OkStatus(); } absl::Status CollectData(tensorflow::profiler::XSpace*) override { return absl::OkStatus(); } }; std::unique_ptr<ProfilerInterface> TestFactoryFunction( const tensorflow::ProfileOptions& options) { return absl::make_unique<TestProfiler>(); } TEST(ProfilerFactoryTest, FactoryFunctionPointer) { ClearRegisteredProfilersForTest(); RegisterProfilerFactory(&TestFactoryFunction); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_EQ(profilers.size(), 1); } TEST(ProfilerFactoryTest, FactoryLambda) { ClearRegisteredProfilersForTest(); RegisterProfilerFactory([](const tensorflow::ProfileOptions& options) { return absl::make_unique<TestProfiler>(); }); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_EQ(profilers.size(), 1); } std::unique_ptr<ProfilerInterface> NullFactoryFunction( const tensorflow::ProfileOptions& options) { return nullptr; } TEST(ProfilerFactoryTest, FactoryReturnsNull) { ClearRegisteredProfilersForTest(); RegisterProfilerFactory(&NullFactoryFunction); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_TRUE(profilers.empty()); } class FactoryClass { public: explicit FactoryClass(void* ptr) : ptr_(ptr) {} FactoryClass(const FactoryClass&) = default; FactoryClass(FactoryClass&&) = default; std::unique_ptr<ProfilerInterface> CreateProfiler( const tensorflow::ProfileOptions& options) const { return absl::make_unique<TestProfiler>(); } private: void* ptr_ TF_ATTRIBUTE_UNUSED = nullptr; }; TEST(ProfilerFactoryTest, FactoryClassCapturedByLambda) { ClearRegisteredProfilersForTest(); static int token = 42; FactoryClass factory(&token); RegisterProfilerFactory([factory = std::move(factory)]( const tensorflow::ProfileOptions& options) { return factory.CreateProfiler(options); }); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_EQ(profilers.size(), 1); } } } }
void ClearRegisteredProfilersForTest() { mutex_lock lock(mu); GetFactories()->clear(); }
TEST(ProfilerFactoryTest, FactoryFunctionPointer) { ClearRegisteredProfilersForTest(); RegisterProfilerFactory(&TestFactoryFunction); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_EQ(profilers.size(), 1); } TEST(ProfilerFactoryTest, FactoryLambda) { ClearRegisteredProfilersForTest(); RegisterProfilerFactory([](const tensorflow::ProfileOptions& options) { return absl::make_unique<TestProfiler>(); }); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_EQ(profilers.size(), 1); } TEST(ProfilerFactoryTest, FactoryReturnsNull) { ClearRegisteredProfilersForTest(); RegisterProfilerFactory(&NullFactoryFunction); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_TRUE(profilers.empty()); } TEST(ProfilerFactoryTest, FactoryClassCapturedByLambda) { ClearRegisteredProfilersForTest(); static int token = 42; FactoryClass factory(&token); RegisterProfilerFactory([factory = std::move(factory)]( const tensorflow::ProfileOptions& options) { return factory.CreateProfiler(options); }); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_EQ(profilers.size(), 1); }
#include "tensorstore/internal/grid_chunk_key_ranges_base10.h" #include <string> #include <string_view> #include "absl/strings/ascii.h" #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "tensorstore/index.h" #include "tensorstore/index_interval.h" #include "tensorstore/internal/lexicographical_grid_index_key.h" #include "tensorstore/util/span.h" namespace tensorstore { namespace internal { std::string Base10LexicographicalGridIndexKeyParser::FormatKey( span<const Index> grid_indices) const { if (rank == 0) return "0"; std::string key; FormatGridIndexKeyWithDimensionSeparator( key, dimension_separator, [](std::string& out, DimensionIndex dim, Index grid_index) { absl::StrAppend(&out, grid_index); }, rank, grid_indices); return key; } bool Base10LexicographicalGridIndexKeyParser::ParseKey( std::string_view key, span<Index> grid_indices) const { return ParseGridIndexKeyWithDimensionSeparator( dimension_separator, [](std::string_view part, DimensionIndex dim, Index& grid_index) { if (part.empty() || !absl::ascii_isdigit(part.front()) || !absl::ascii_isdigit(part.back()) || !absl::SimpleAtoi(part, &grid_index)) { return false; } return true; }, key, grid_indices); } Index Base10LexicographicalGridIndexKeyParser:: MinGridIndexForLexicographicalOrder(DimensionIndex dim, IndexInterval grid_interval) const { return MinValueWithMaxBase10Digits(grid_interval.exclusive_max()); } Index MinValueWithMaxBase10Digits(Index exclusive_max) { if (exclusive_max <= 10) { return 0; } Index min_value = 10; while (min_value * 10 < exclusive_max) { min_value *= 10; } return min_value; } } }
#include "tensorstore/internal/grid_chunk_key_ranges_base10.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/index.h" #include "tensorstore/index_interval.h" namespace { using ::tensorstore::Index; using ::tensorstore::IndexInterval; using ::tensorstore::internal::Base10LexicographicalGridIndexKeyParser; using ::tensorstore::internal::MinValueWithMaxBase10Digits; TEST(Base10LexicographicalGridIndexKeyParserTest, FormatKeyRank0) { Base10LexicographicalGridIndexKeyParser parser(0, '/'); EXPECT_THAT(parser.FormatKey({}), "0"); } TEST(Base10LexicographicalGridIndexKeyParserTest, FormatKeyRank1) { Base10LexicographicalGridIndexKeyParser parser(1, '/'); EXPECT_THAT(parser.FormatKey({{2}}), "2"); EXPECT_THAT(parser.FormatKey({}), ""); } TEST(Base10LexicographicalGridIndexKeyParserTest, FormatKeyRank2) { Base10LexicographicalGridIndexKeyParser parser(2, '/'); EXPECT_THAT(parser.FormatKey({{2, 3}}), "2/3"); EXPECT_THAT(parser.FormatKey({{2}}), "2/"); EXPECT_THAT(parser.FormatKey({}), ""); } TEST(Base10LexicographicalGridIndexKeyParserTest, ParseKeyRank1) { Base10LexicographicalGridIndexKeyParser parser(1, '/'); Index indices[1]; EXPECT_TRUE(parser.ParseKey("2", indices)); EXPECT_THAT(indices, ::testing::ElementsAre(2)); EXPECT_FALSE(parser.ParseKey("", indices)); EXPECT_FALSE(parser.ParseKey("-1", indices)); EXPECT_FALSE(parser.ParseKey("a", indices)); EXPECT_FALSE(parser.ParseKey("2/3", indices)); EXPECT_FALSE(parser.ParseKey("2/", indices)); } TEST(Base10LexicographicalGridIndexKeyParserTest, ParseKeyRank2) { Base10LexicographicalGridIndexKeyParser parser(2, '/'); Index indices[2]; EXPECT_TRUE(parser.ParseKey("2/3", indices)); EXPECT_THAT(indices, ::testing::ElementsAre(2, 3)); EXPECT_TRUE(parser.ParseKey("212/335", indices)); EXPECT_THAT(indices, ::testing::ElementsAre(212, 335)); EXPECT_FALSE(parser.ParseKey("1", indices)); EXPECT_FALSE(parser.ParseKey("", indices)); EXPECT_FALSE(parser.ParseKey("1/2/3", indices)); EXPECT_FALSE(parser.ParseKey("1/2/", indices)); } TEST(Base10LexicographicalGridIndexKeyParserTest, MinGridIndexForLexicographicalOrder) { Base10LexicographicalGridIndexKeyParser parser(2, '/'); EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder( 0, IndexInterval::UncheckedHalfOpen(0, 1)), 0); EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder( 0, IndexInterval::UncheckedHalfOpen(0, 9)), 0); EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder( 0, IndexInterval::UncheckedHalfOpen(0, 10)), 0); EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder( 0, IndexInterval::UncheckedHalfOpen(0, 11)), 10); EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder( 0, IndexInterval::UncheckedHalfOpen(0, 100)), 10); EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder( 0, IndexInterval::UncheckedHalfOpen(0, 101)), 100); EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder( 0, IndexInterval::UncheckedHalfOpen(0, 999)), 100); EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder( 0, IndexInterval::UncheckedHalfOpen(0, 1000)), 100); EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder( 0, IndexInterval::UncheckedHalfOpen(0, 1001)), 1000); } TEST(MinValueWithMaxBase10DigitsTest, Basic) { EXPECT_EQ(0, MinValueWithMaxBase10Digits(0)); EXPECT_EQ(0, MinValueWithMaxBase10Digits(1)); EXPECT_EQ(0, MinValueWithMaxBase10Digits(9)); EXPECT_EQ(0, MinValueWithMaxBase10Digits(10)); EXPECT_EQ(10, MinValueWithMaxBase10Digits(11)); EXPECT_EQ(10, MinValueWithMaxBase10Digits(100)); EXPECT_EQ(100, MinValueWithMaxBase10Digits(101)); EXPECT_EQ(100, MinValueWithMaxBase10Digits(999)); EXPECT_EQ(100, MinValueWithMaxBase10Digits(1000)); EXPECT_EQ(1000, MinValueWithMaxBase10Digits(1001)); } }
Index MinValueWithMaxBase10Digits(Index exclusive_max) { if (exclusive_max <= 10) { return 0; } Index min_value = 10; while (min_value * 10 < exclusive_max) { min_value *= 10; } return min_value; }
TEST(MinValueWithMaxBase10DigitsTest, Basic) { EXPECT_EQ(0, MinValueWithMaxBase10Digits(0)); EXPECT_EQ(0, MinValueWithMaxBase10Digits(1)); EXPECT_EQ(0, MinValueWithMaxBase10Digits(9)); EXPECT_EQ(0, MinValueWithMaxBase10Digits(10)); EXPECT_EQ(10, MinValueWithMaxBase10Digits(11)); EXPECT_EQ(10, MinValueWithMaxBase10Digits(100)); EXPECT_EQ(100, MinValueWithMaxBase10Digits(101)); EXPECT_EQ(100, MinValueWithMaxBase10Digits(999)); EXPECT_EQ(100, MinValueWithMaxBase10Digits(1000)); EXPECT_EQ(1000, MinValueWithMaxBase10Digits(1001)); }
#include "quiche/quic/core/http/web_transport_http3.h" #include <limits> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/strings/string_view.h" #include "quiche/quic/core/http/quic_spdy_session.h" #include "quiche/quic/core/http/quic_spdy_stream.h" #include "quiche/quic/core/quic_data_reader.h" #include "quiche/quic/core/quic_data_writer.h" #include "quiche/quic/core/quic_error_codes.h" #include "quiche/quic/core/quic_stream.h" #include "quiche/quic/core/quic_types.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/core/quic_versions.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/common/capsule.h" #include "quiche/common/platform/api/quiche_logging.h" #include "quiche/web_transport/web_transport.h" #define ENDPOINT \ (session_->perspective() == Perspective::IS_SERVER ? "Server: " : "Client: ") namespace quic { namespace { class NoopWebTransportVisitor : public WebTransportVisitor { void OnSessionReady() override {} void OnSessionClosed(WebTransportSessionError , const std::string& ) override {} void OnIncomingBidirectionalStreamAvailable() override {} void OnIncomingUnidirectionalStreamAvailable() override {} void OnDatagramReceived(absl::string_view ) override {} void OnCanCreateNewOutgoingBidirectionalStream() override {} void OnCanCreateNewOutgoingUnidirectionalStream() override {} }; } WebTransportHttp3::WebTransportHttp3(QuicSpdySession* session, QuicSpdyStream* connect_stream, WebTransportSessionId id) : session_(session), connect_stream_(connect_stream), id_(id), visitor_(std::make_unique<NoopWebTransportVisitor>()) { QUICHE_DCHECK(session_->SupportsWebTransport()); QUICHE_DCHECK(IsValidWebTransportSessionId(id, session_->version())); QUICHE_DCHECK_EQ(connect_stream_->id(), id); connect_stream_->RegisterHttp3DatagramVisitor(this); } void WebTransportHttp3::AssociateStream(QuicStreamId stream_id) { streams_.insert(stream_id); ParsedQuicVersion version = session_->version(); if (QuicUtils::IsOutgoingStreamId(version, stream_id, session_->perspective())) { return; } if (QuicUtils::IsBidirectionalStreamId(stream_id, version)) { incoming_bidirectional_streams_.push_back(stream_id); visitor_->OnIncomingBidirectionalStreamAvailable(); } else { incoming_unidirectional_streams_.push_back(stream_id); visitor_->OnIncomingUnidirectionalStreamAvailable(); } } void WebTransportHttp3::OnConnectStreamClosing() { std::vector<QuicStreamId> streams(streams_.begin(), streams_.end()); streams_.clear(); for (QuicStreamId id : streams) { session_->ResetStream(id, QUIC_STREAM_WEBTRANSPORT_SESSION_GONE); } connect_stream_->UnregisterHttp3DatagramVisitor(); MaybeNotifyClose(); } void WebTransportHttp3::CloseSession(WebTransportSessionError error_code, absl::string_view error_message) { if (close_sent_) { QUIC_BUG(WebTransportHttp3 close sent twice) << "Calling WebTransportHttp3::CloseSession() more than once is not " "allowed."; return; } close_sent_ = true; if (close_received_) { QUIC_DLOG(INFO) << "Not sending CLOSE_WEBTRANSPORT_SESSION as we've " "already sent one from peer."; return; } error_code_ = error_code; error_message_ = std::string(error_message); QuicConnection::ScopedPacketFlusher flusher( connect_stream_->spdy_session()->connection()); connect_stream_->WriteCapsule( quiche::Capsule::CloseWebTransportSession(error_code, error_message), true); } void WebTransportHttp3::OnCloseReceived(WebTransportSessionError error_code, absl::string_view error_message) { if (close_received_) { QUIC_BUG(WebTransportHttp3 notified of close received twice) << "WebTransportHttp3::OnCloseReceived() may be only called once."; } close_received_ = true; if (close_sent_) { QUIC_DLOG(INFO) << "Ignoring received CLOSE_WEBTRANSPORT_SESSION as we've " "already sent our own."; return; } error_code_ = error_code; error_message_ = std::string(error_message); connect_stream_->WriteOrBufferBody("", true); MaybeNotifyClose(); } void WebTransportHttp3::OnConnectStreamFinReceived() { if (close_received_) { return; } close_received_ = true; if (close_sent_) { QUIC_DLOG(INFO) << "Ignoring received FIN as we've already sent our close."; return; } connect_stream_->WriteOrBufferBody("", true); MaybeNotifyClose(); } void WebTransportHttp3::CloseSessionWithFinOnlyForTests() { QUICHE_DCHECK(!close_sent_); close_sent_ = true; if (close_received_) { return; } connect_stream_->WriteOrBufferBody("", true); } void WebTransportHttp3::HeadersReceived(const spdy::Http2HeaderBlock& headers) { if (session_->perspective() == Perspective::IS_CLIENT) { int status_code; if (!QuicSpdyStream::ParseHeaderStatusCode(headers, &status_code)) { QUIC_DVLOG(1) << ENDPOINT << "Received WebTransport headers from server without " "a valid status code, rejecting."; rejection_reason_ = WebTransportHttp3RejectionReason::kNoStatusCode; return; } bool valid_status = status_code >= 200 && status_code <= 299; if (!valid_status) { QUIC_DVLOG(1) << ENDPOINT << "Received WebTransport headers from server with " "status code " << status_code << ", rejecting."; rejection_reason_ = WebTransportHttp3RejectionReason::kWrongStatusCode; return; } } QUIC_DVLOG(1) << ENDPOINT << "WebTransport session " << id_ << " ready."; ready_ = true; visitor_->OnSessionReady(); session_->ProcessBufferedWebTransportStreamsForSession(this); } WebTransportStream* WebTransportHttp3::AcceptIncomingBidirectionalStream() { while (!incoming_bidirectional_streams_.empty()) { QuicStreamId id = incoming_bidirectional_streams_.front(); incoming_bidirectional_streams_.pop_front(); QuicSpdyStream* stream = session_->GetOrCreateSpdyDataStream(id); if (stream == nullptr) { continue; } return stream->web_transport_stream(); } return nullptr; } WebTransportStream* WebTransportHttp3::AcceptIncomingUnidirectionalStream() { while (!incoming_unidirectional_streams_.empty()) { QuicStreamId id = incoming_unidirectional_streams_.front(); incoming_unidirectional_streams_.pop_front(); QuicStream* stream = session_->GetOrCreateStream(id); if (stream == nullptr) { continue; } return static_cast<WebTransportHttp3UnidirectionalStream*>(stream) ->interface(); } return nullptr; } bool WebTransportHttp3::CanOpenNextOutgoingBidirectionalStream() { return session_->CanOpenOutgoingBidirectionalWebTransportStream(id_); } bool WebTransportHttp3::CanOpenNextOutgoingUnidirectionalStream() { return session_->CanOpenOutgoingUnidirectionalWebTransportStream(id_); } WebTransportStream* WebTransportHttp3::OpenOutgoingBidirectionalStream() { QuicSpdyStream* stream = session_->CreateOutgoingBidirectionalWebTransportStream(this); if (stream == nullptr) { return nullptr; } return stream->web_transport_stream(); } WebTransportStream* WebTransportHttp3::OpenOutgoingUnidirectionalStream() { WebTransportHttp3UnidirectionalStream* stream = session_->CreateOutgoingUnidirectionalWebTransportStream(this); if (stream == nullptr) { return nullptr; } return stream->interface(); } webtransport::Stream* WebTransportHttp3::GetStreamById( webtransport::StreamId id) { if (!streams_.contains(id)) { return nullptr; } QuicStream* stream = session_->GetActiveStream(id); const bool bidi = QuicUtils::IsBidirectionalStreamId( id, ParsedQuicVersion::RFCv1()); if (bidi) { return static_cast<QuicSpdyStream*>(stream)->web_transport_stream(); } else { return static_cast<WebTransportHttp3UnidirectionalStream*>(stream) ->interface(); } } webtransport::DatagramStatus WebTransportHttp3::SendOrQueueDatagram( absl::string_view datagram) { return MessageStatusToWebTransportStatus( connect_stream_->SendHttp3Datagram(datagram)); } QuicByteCount WebTransportHttp3::GetMaxDatagramSize() const { return connect_stream_->GetMaxDatagramSize(); } void WebTransportHttp3::SetDatagramMaxTimeInQueue( absl::Duration max_time_in_queue) { connect_stream_->SetMaxDatagramTimeInQueue(QuicTimeDelta(max_time_in_queue)); } void WebTransportHttp3::NotifySessionDraining() { if (!drain_sent_) { connect_stream_->WriteCapsule( quiche::Capsule(quiche::DrainWebTransportSessionCapsule())); drain_sent_ = true; } } void WebTransportHttp3::OnHttp3Datagram(QuicStreamId stream_id, absl::string_view payload) { QUICHE_DCHECK_EQ(stream_id, connect_stream_->id()); visitor_->OnDatagramReceived(payload); } void WebTransportHttp3::MaybeNotifyClose() { if (close_notified_) { return; } close_notified_ = true; visitor_->OnSessionClosed(error_code_, error_message_); } void WebTransportHttp3::OnGoAwayReceived() { if (drain_callback_ != nullptr) { std::move(drain_callback_)(); drain_callback_ = nullptr; } } void WebTransportHttp3::OnDrainSessionReceived() { OnGoAwayReceived(); } WebTransportHttp3UnidirectionalStream::WebTransportHttp3UnidirectionalStream( PendingStream* pending, QuicSpdySession* session) : QuicStream(pending, session, false), session_(session), adapter_(session, this, sequencer(), std::nullopt), needs_to_send_preamble_(false) { sequencer()->set_level_triggered(true); } WebTransportHttp3UnidirectionalStream::WebTransportHttp3UnidirectionalStream( QuicStreamId id, QuicSpdySession* session, WebTransportSessionId session_id) : QuicStream(id, session, false, WRITE_UNIDIRECTIONAL), session_(session), adapter_(session, this, sequencer(), session_id), session_id_(session_id), needs_to_send_preamble_(true) {} void WebTransportHttp3UnidirectionalStream::WritePreamble() { if (!needs_to_send_preamble_ || !session_id_.has_value()) { QUIC_BUG(WebTransportHttp3UnidirectionalStream duplicate preamble) << ENDPOINT << "Sending preamble on stream ID " << id() << " at the wrong time."; OnUnrecoverableError(QUIC_INTERNAL_ERROR, "Attempting to send a WebTransport unidirectional " "stream preamble at the wrong time."); return; } QuicConnection::ScopedPacketFlusher flusher(session_->connection()); char buffer[sizeof(uint64_t) * 2]; QuicDataWriter writer(sizeof(buffer), buffer); bool success = true; success = success && writer.WriteVarInt62(kWebTransportUnidirectionalStream); success = success && writer.WriteVarInt62(*session_id_); QUICHE_DCHECK(success); WriteOrBufferData(absl::string_view(buffer, writer.length()), false, nullptr); QUIC_DVLOG(1) << ENDPOINT << "Sent stream type and session ID (" << *session_id_ << ") on WebTransport stream " << id(); needs_to_send_preamble_ = false; } bool WebTransportHttp3UnidirectionalStream::ReadSessionId() { iovec iov; if (!sequencer()->GetReadableRegion(&iov)) { return false; } QuicDataReader reader(static_cast<const char*>(iov.iov_base), iov.iov_len); WebTransportSessionId session_id; uint8_t session_id_length = reader.PeekVarInt62Length(); if (!reader.ReadVarInt62(&session_id)) { if (sequencer()->IsAllDataAvailable()) { QUIC_DLOG(WARNING) << ENDPOINT << "Failed to associate WebTransport stream " << id() << " with a session because the stream ended prematurely."; sequencer()->MarkConsumed(sequencer()->NumBytesBuffered()); } return false; } sequencer()->MarkConsumed(session_id_length); session_id_ = session_id; adapter_.SetSessionId(session_id); session_->AssociateIncomingWebTransportStreamWithSession(session_id, id()); return true; } void WebTransportHttp3UnidirectionalStream::OnDataAvailable() { if (!session_id_.has_value()) { if (!ReadSessionId()) { return; } } adapter_.OnDataAvailable(); } void WebTransportHttp3UnidirectionalStream::OnCanWriteNewData() { adapter_.OnCanWriteNewData(); } void WebTransportHttp3UnidirectionalStream::OnClose() { QuicStream::OnClose(); if (!session_id_.has_value()) { return; } WebTransportHttp3* session = session_->GetWebTransportSession(*session_id_); if (session == nullptr) { QUIC_DLOG(WARNING) << ENDPOINT << "WebTransport stream " << id() << " attempted to notify parent session " << *session_id_ << ", but the session could not be found."; return; } session->OnStreamClosed(id()); } void WebTransportHttp3UnidirectionalStream::OnStreamReset( const QuicRstStreamFrame& frame) { if (adapter_.visitor() != nullptr) { adapter_.visitor()->OnResetStreamReceived( Http3ErrorToWebTransportOrDefault(frame.ietf_error_code)); } QuicStream::OnStreamReset(frame); } bool WebTransportHttp3UnidirectionalStream::OnStopSending( QuicResetStreamError error) { if (adapter_.visitor() != nullptr) { adapter_.visitor()->OnStopSendingReceived( Http3ErrorToWebTransportOrDefault(error.ietf_application_code())); } return QuicStream::OnStopSending(error); } void WebTransportHttp3UnidirectionalStream::OnWriteSideInDataRecvdState() { if (adapter_.visitor() != nullptr) { adapter_.visitor()->OnWriteSideInDataRecvdState(); } QuicStream::OnWriteSideInDataRecvdState(); } namespace { constexpr uint64_t kWebTransportMappedErrorCodeFirst = 0x52e4a40fa8db; constexpr uint64_t kWebTransportMappedErrorCodeLast = 0x52e5ac983162; constexpr WebTransportStreamError kDefaultWebTransportError = 0; } std::optional<WebTransportStreamError> Http3ErrorToWebTransport( uint64_t http3_error_code) { if (http3_error_code < kWebTransportMappedErrorCodeFirst || http3_error_code > kWebTransportMappedErrorCodeLast) { return std::nullopt; } if ((http3_error_code - 0x21) % 0x1f == 0) { return std::nullopt; } uint64_t shifted = http3_error_code - kWebTransportMappedErrorCodeFirst; uint64_t result = shifted - shifted / 0x1f; QUICHE_DCHECK_LE(result, std::numeric_limits<webtransport::StreamErrorCode>::max()); return static_cast<WebTransportStreamError>(result); } WebTransportStreamError Http3ErrorToWebTransportOrDefault( uint64_t http3_error_code) { std::optional<WebTransportStreamError> result = Http3ErrorToWebTransport(http3_error_code); return result.has_value() ? *result : kDefaultWebTransportError; } uint64_t WebTransportErrorToHttp3( WebTransportStreamError webtransport_error_code) { return kWebTransportMappedErrorCodeFirst + webtransport_error_code + webtransport_error_code / 0x1e; } }
#include "quiche/quic/core/http/web_transport_http3.h" #include <cstdint> #include <limits> #include <optional> #include "quiche/quic/platform/api/quic_test.h" namespace quic { namespace { using ::testing::Optional; TEST(WebTransportHttp3Test, ErrorCodesToHttp3) { EXPECT_EQ(0x52e4a40fa8dbu, WebTransportErrorToHttp3(0x00)); EXPECT_EQ(0x52e4a40fa9e2u, WebTransportErrorToHttp3(0xff)); EXPECT_EQ(0x52e5ac983162u, WebTransportErrorToHttp3(0xffffffff)); EXPECT_EQ(0x52e4a40fa8f7u, WebTransportErrorToHttp3(0x1c)); EXPECT_EQ(0x52e4a40fa8f8u, WebTransportErrorToHttp3(0x1d)); EXPECT_EQ(0x52e4a40fa8fau, WebTransportErrorToHttp3(0x1e)); } TEST(WebTransportHttp3Test, ErrorCodesToWebTransport) { EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8db), Optional(0x00)); EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa9e2), Optional(0xff)); EXPECT_THAT(Http3ErrorToWebTransport(0x52e5ac983162u), Optional(0xffffffff)); EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8f7), Optional(0x1cu)); EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8f8), Optional(0x1du)); EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8f9), std::nullopt); EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8fa), Optional(0x1eu)); EXPECT_EQ(Http3ErrorToWebTransport(0), std::nullopt); EXPECT_EQ(Http3ErrorToWebTransport(std::numeric_limits<uint64_t>::max()), std::nullopt); } TEST(WebTransportHttp3Test, ErrorCodeRoundTrip) { for (int error = 0; error <= 65536; error++) { uint64_t http_error = WebTransportErrorToHttp3(error); std::optional<WebTransportStreamError> mapped_back = quic::Http3ErrorToWebTransport(http_error); ASSERT_THAT(mapped_back, Optional(error)); } for (int64_t error = 0; error < std::numeric_limits<uint32_t>::max(); error += 65537) { uint64_t http_error = WebTransportErrorToHttp3(error); std::optional<WebTransportStreamError> mapped_back = quic::Http3ErrorToWebTransport(http_error); ASSERT_THAT(mapped_back, Optional(error)); } } } }
uint64_t WebTransportErrorToHttp3( WebTransportStreamError webtransport_error_code) { return kWebTransportMappedErrorCodeFirst + webtransport_error_code + webtransport_error_code / 0x1e; }
TEST(WebTransportHttp3Test, ErrorCodesToHttp3) { EXPECT_EQ(0x52e4a40fa8dbu, WebTransportErrorToHttp3(0x00)); EXPECT_EQ(0x52e4a40fa9e2u, WebTransportErrorToHttp3(0xff)); EXPECT_EQ(0x52e5ac983162u, WebTransportErrorToHttp3(0xffffffff)); EXPECT_EQ(0x52e4a40fa8f7u, WebTransportErrorToHttp3(0x1c)); EXPECT_EQ(0x52e4a40fa8f8u, WebTransportErrorToHttp3(0x1d)); EXPECT_EQ(0x52e4a40fa8fau, WebTransportErrorToHttp3(0x1e)); } TEST(WebTransportHttp3Test, ErrorCodeRoundTrip) { for (int error = 0; error <= 65536; error++) { uint64_t http_error = WebTransportErrorToHttp3(error); std::optional<WebTransportStreamError> mapped_back = quic::Http3ErrorToWebTransport(http_error); ASSERT_THAT(mapped_back, Optional(error)); } for (int64_t error = 0; error < std::numeric_limits<uint32_t>::max(); error += 65537) { uint64_t http_error = WebTransportErrorToHttp3(error); std::optional<WebTransportStreamError> mapped_back = quic::Http3ErrorToWebTransport(http_error); ASSERT_THAT(mapped_back, Optional(error)); } }
#include "quiche/quic/core/crypto/null_decrypter.h" #include <cstdint> #include <limits> #include <string> #include "absl/numeric/int128.h" #include "absl/strings/string_view.h" #include "quiche/quic/core/quic_data_reader.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/common/quiche_endian.h" namespace quic { NullDecrypter::NullDecrypter(Perspective perspective) : perspective_(perspective) {} bool NullDecrypter::SetKey(absl::string_view key) { return key.empty(); } bool NullDecrypter::SetNoncePrefix(absl::string_view nonce_prefix) { return nonce_prefix.empty(); } bool NullDecrypter::SetIV(absl::string_view iv) { return iv.empty(); } bool NullDecrypter::SetHeaderProtectionKey(absl::string_view key) { return key.empty(); } bool NullDecrypter::SetPreliminaryKey(absl::string_view ) { QUIC_BUG(quic_bug_10652_1) << "Should not be called"; return false; } bool NullDecrypter::SetDiversificationNonce( const DiversificationNonce& ) { QUIC_BUG(quic_bug_10652_2) << "Should not be called"; return true; } bool NullDecrypter::DecryptPacket(uint64_t , absl::string_view associated_data, absl::string_view ciphertext, char* output, size_t* output_length, size_t max_output_length) { QuicDataReader reader(ciphertext.data(), ciphertext.length(), quiche::HOST_BYTE_ORDER); absl::uint128 hash; if (!ReadHash(&reader, &hash)) { return false; } absl::string_view plaintext = reader.ReadRemainingPayload(); if (plaintext.length() > max_output_length) { QUIC_BUG(quic_bug_10652_3) << "Output buffer must be larger than the plaintext."; return false; } if (hash != ComputeHash(associated_data, plaintext)) { return false; } memcpy(output, plaintext.data(), plaintext.length()); *output_length = plaintext.length(); return true; } std::string NullDecrypter::GenerateHeaderProtectionMask( QuicDataReader* ) { return std::string(5, 0); } size_t NullDecrypter::GetKeySize() const { return 0; } size_t NullDecrypter::GetNoncePrefixSize() const { return 0; } size_t NullDecrypter::GetIVSize() const { return 0; } absl::string_view NullDecrypter::GetKey() const { return absl::string_view(); } absl::string_view NullDecrypter::GetNoncePrefix() const { return absl::string_view(); } uint32_t NullDecrypter::cipher_id() const { return 0; } QuicPacketCount NullDecrypter::GetIntegrityLimit() const { return std::numeric_limits<QuicPacketCount>::max(); } bool NullDecrypter::ReadHash(QuicDataReader* reader, absl::uint128* hash) { uint64_t lo; uint32_t hi; if (!reader->ReadUInt64(&lo) || !reader->ReadUInt32(&hi)) { return false; } *hash = absl::MakeUint128(hi, lo); return true; } absl::uint128 NullDecrypter::ComputeHash(const absl::string_view data1, const absl::string_view data2) const { absl::uint128 correct_hash; if (perspective_ == Perspective::IS_CLIENT) { correct_hash = QuicUtils::FNV1a_128_Hash_Three(data1, data2, "Server"); } else { correct_hash = QuicUtils::FNV1a_128_Hash_Three(data1, data2, "Client"); } absl::uint128 mask = absl::MakeUint128(UINT64_C(0x0), UINT64_C(0xffffffff)); mask <<= 96; correct_hash &= ~mask; return correct_hash; } }
#include "quiche/quic/core/crypto/null_decrypter.h" #include "absl/base/macros.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/quic_test_utils.h" namespace quic { namespace test { class NullDecrypterTest : public QuicTestWithParam<bool> {}; TEST_F(NullDecrypterTest, DecryptClient) { unsigned char expected[] = { 0x97, 0xdc, 0x27, 0x2f, 0x18, 0xa8, 0x56, 0x73, 0xdf, 0x8d, 0x1d, 0xd0, 'g', 'o', 'o', 'd', 'b', 'y', 'e', '!', }; const char* data = reinterpret_cast<const char*>(expected); size_t len = ABSL_ARRAYSIZE(expected); NullDecrypter decrypter(Perspective::IS_SERVER); char buffer[256]; size_t length = 0; ASSERT_TRUE(decrypter.DecryptPacket( 0, "hello world!", absl::string_view(data, len), buffer, &length, 256)); EXPECT_LT(0u, length); EXPECT_EQ("goodbye!", absl::string_view(buffer, length)); } TEST_F(NullDecrypterTest, DecryptServer) { unsigned char expected[] = { 0x63, 0x5e, 0x08, 0x03, 0x32, 0x80, 0x8f, 0x73, 0xdf, 0x8d, 0x1d, 0x1a, 'g', 'o', 'o', 'd', 'b', 'y', 'e', '!', }; const char* data = reinterpret_cast<const char*>(expected); size_t len = ABSL_ARRAYSIZE(expected); NullDecrypter decrypter(Perspective::IS_CLIENT); char buffer[256]; size_t length = 0; ASSERT_TRUE(decrypter.DecryptPacket( 0, "hello world!", absl::string_view(data, len), buffer, &length, 256)); EXPECT_LT(0u, length); EXPECT_EQ("goodbye!", absl::string_view(buffer, length)); } TEST_F(NullDecrypterTest, BadHash) { unsigned char expected[] = { 0x46, 0x11, 0xea, 0x5f, 0xcf, 0x1d, 0x66, 0x5b, 0xba, 0xf0, 0xbc, 0xfd, 'g', 'o', 'o', 'd', 'b', 'y', 'e', '!', }; const char* data = reinterpret_cast<const char*>(expected); size_t len = ABSL_ARRAYSIZE(expected); NullDecrypter decrypter(Perspective::IS_CLIENT); char buffer[256]; size_t length = 0; ASSERT_FALSE(decrypter.DecryptPacket( 0, "hello world!", absl::string_view(data, len), buffer, &length, 256)); } TEST_F(NullDecrypterTest, ShortInput) { unsigned char expected[] = { 0x46, 0x11, 0xea, 0x5f, 0xcf, 0x1d, 0x66, 0x5b, 0xba, 0xf0, 0xbc, }; const char* data = reinterpret_cast<const char*>(expected); size_t len = ABSL_ARRAYSIZE(expected); NullDecrypter decrypter(Perspective::IS_CLIENT); char buffer[256]; size_t length = 0; ASSERT_FALSE(decrypter.DecryptPacket( 0, "hello world!", absl::string_view(data, len), buffer, &length, 256)); } } }
bool NullDecrypter::ReadHash(QuicDataReader* reader, absl::uint128* hash) { uint64_t lo; uint32_t hi; if (!reader->ReadUInt64(&lo) || !reader->ReadUInt32(&hi)) { return false; } *hash = absl::MakeUint128(hi, lo); return true; }
TEST_F(NullDecrypterTest, DecryptClient) { unsigned char expected[] = { 0x97, 0xdc, 0x27, 0x2f, 0x18, 0xa8, 0x56, 0x73, 0xdf, 0x8d, 0x1d, 0xd0, 'g', 'o', 'o', 'd', 'b', 'y', 'e', '!', }; const char* data = reinterpret_cast<const char*>(expected); size_t len = ABSL_ARRAYSIZE(expected); NullDecrypter decrypter(Perspective::IS_SERVER); char buffer[256]; size_t length = 0; ASSERT_TRUE(decrypter.DecryptPacket( 0, "hello world!", absl::string_view(data, len), buffer, &length, 256)); EXPECT_LT(0u, length); EXPECT_EQ("goodbye!", absl::string_view(buffer, length)); } TEST_F(NullDecrypterTest, DecryptServer) { unsigned char expected[] = { 0x63, 0x5e, 0x08, 0x03, 0x32, 0x80, 0x8f, 0x73, 0xdf, 0x8d, 0x1d, 0x1a, 'g', 'o', 'o', 'd', 'b', 'y', 'e', '!', }; const char* data = reinterpret_cast<const char*>(expected); size_t len = ABSL_ARRAYSIZE(expected); NullDecrypter decrypter(Perspective::IS_CLIENT); char buffer[256]; size_t length = 0; ASSERT_TRUE(decrypter.DecryptPacket( 0, "hello world!", absl::string_view(data, len), buffer, &length, 256)); EXPECT_LT(0u, length); EXPECT_EQ("goodbye!", absl::string_view(buffer, length)); } TEST_F(NullDecrypterTest, BadHash) { unsigned char expected[] = { 0x46, 0x11, 0xea, 0x5f, 0xcf, 0x1d, 0x66, 0x5b, 0xba, 0xf0, 0xbc, 0xfd, 'g', 'o', 'o', 'd', 'b', 'y', 'e', '!', }; const char* data = reinterpret_cast<const char*>(expected); size_t len = ABSL_ARRAYSIZE(expected); NullDecrypter decrypter(Perspective::IS_CLIENT); char buffer[256]; size_t length = 0; ASSERT_FALSE(decrypter.DecryptPacket( 0, "hello world!", absl::string_view(data, len), buffer, &length, 256)); } TEST_F(NullDecrypterTest, ShortInput) { unsigned char expected[] = { 0x46, 0x11, 0xea, 0x5f, 0xcf, 0x1d, 0x66, 0x5b, 0xba, 0xf0, 0xbc, }; const char* data = reinterpret_cast<const char*>(expected); size_t len = ABSL_ARRAYSIZE(expected); NullDecrypter decrypter(Perspective::IS_CLIENT); char buffer[256]; size_t length = 0; ASSERT_FALSE(decrypter.DecryptPacket( 0, "hello world!", absl::string_view(data, len), buffer, &length, 256)); }
#include "arolla/codegen/expr/codegen_operator.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <functional> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/types/span.h" #include "arolla/algorithm/control_flow_graph.h" #include "arolla/codegen/expr/optimizations.h" #include "arolla/codegen/expr/types.h" #include "arolla/expr/annotation_utils.h" #include "arolla/expr/basic_expr_operator.h" #include "arolla/expr/derived_qtype_cast_operator.h" #include "arolla/expr/eval/eval.h" #include "arolla/expr/eval/prepare_expression.h" #include "arolla/expr/eval/side_output.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_debug_string.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/expr_operator.h" #include "arolla/expr/expr_operator_signature.h" #include "arolla/expr/expr_visitor.h" #include "arolla/expr/registered_expr_operator.h" #include "arolla/qexpr/operator_metadata.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/derived_qtype.h" #include "arolla/qtype/optional_qtype.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/util/bytes.h" #include "arolla/util/fast_dynamic_downcast_final.h" #include "arolla/util/fingerprint.h" #include "arolla/util/map.h" #include "arolla/util/text.h" #include "arolla/util/status_macros_backport.h" ABSL_FLAG(int64_t, arolla_codegen_min_local_variables_per_lambda, 50, R"""( Minimum number of local variables required in order to create lambda. There are several things to consider for tuning this parameter. 1. maximum depth of braces is limited in C++, so we shouldn't create too deep structure. 2. C++ compiler can be not good in optimizing too many lambda functions. 3. On other hand smaller number can eliminate stack usage more. 4. It is not clear whenever compiler can successfully reuse stack memory for several variables with the same type. )"""); ABSL_FLAG(int64_t, arolla_codegen_max_allowed_inline_depth, 50, R"""( Maximim depth in inlining function calls that used only once. There are several things to consider for tuning this parameter. 1. Inlining may help compiler to optimize better and take advantage of temporary variables, save stack pressure. 2. Inlining making code slightly more readable. 3. maximum depth of braces is limited in C++, so we shouldn't create too deep structure. )"""); namespace arolla::codegen { namespace codegen_impl { bool IsInlinableLiteralType(const QType* qtype) { auto is_primitive_type = [](const QType* type) { return IsScalarQType(type) && type != GetQType<Text>() && type != GetQType<Bytes>(); }; return qtype != nullptr && is_primitive_type(DecayOptionalQType(qtype)); } } namespace { using expr::BackendExprOperatorTag; using expr::DecayRegisteredOperator; using expr::ExprNodePtr; using expr::ExprNodeType; using expr::ExprOperatorPtr; using expr::ExprOperatorSignature; using expr::UnnamedExprOperator; using expr::eval_internal::InternalRootOperator; using NodeId = AcyclicCFG::NodeId; class InternalNamedOutputExportOperator final : public UnnamedExprOperator { public: explicit InternalNamedOutputExportOperator(int64_t export_id) : UnnamedExprOperator( ExprOperatorSignature({{"x"}}), FingerprintHasher("codegen::InternalNamedOutputExportOperator") .Combine(export_id) .Finish()), export_id_(export_id) {} absl::StatusOr<QTypePtr> GetOutputQType( absl::Span<const QTypePtr> input_qtypes) const final { return input_qtypes[0]; } int64_t ExportId() const { return export_id_; } private: int64_t export_id_; }; std::optional<int64_t> MaybeGetExportId(const ExprNodePtr& node) { if (auto* export_op = fast_dynamic_downcast_final<const InternalNamedOutputExportOperator*>( node->op().get())) { return export_op->ExportId(); } return std::nullopt; } absl::StatusOr<std::vector<QTypePtr>> DependencyTypes( const ExprNodePtr& node, std::function<absl::StatusOr<QTypePtr>(const ExprNodePtr&)> qtype_from_expr_fn) { std::vector<QTypePtr> result; result.reserve(node->node_deps().size()); for (const ExprNodePtr& dep : node->node_deps()) { ASSIGN_OR_RETURN(result.emplace_back(), qtype_from_expr_fn(dep)); } return result; } absl::StatusOr<std::optional<QExprOperatorMetadata>> GetOperatorMetadata( const QExprOperatorMetadataRegistry& op_registry, const ExprNodePtr& node, std::function<absl::StatusOr<QTypePtr>(const ExprNodePtr&)> qtype_from_expr_fn) { ASSIGN_OR_RETURN(auto op, DecayRegisteredOperator(node->op())); if (op == InternalRootOperator()) { return std::nullopt; } if (expr::IsQTypeAnnotation(node)) { return std::nullopt; } if (auto export_id_opt = MaybeGetExportId(node); export_id_opt.has_value()) { return std::nullopt; } if (typeid(*op) == typeid(expr::DerivedQTypeUpcastOperator) || typeid(*op) == typeid(expr::DerivedQTypeDowncastOperator)) { return std::nullopt; } if (dynamic_cast<const BackendExprOperatorTag*>(op.get()) == nullptr) { return absl::InvalidArgumentError(absl::StrCat( node->op()->display_name(), " is not a backend ExprOperator")); } ASSIGN_OR_RETURN(auto dependency_types, DependencyTypes(node, qtype_from_expr_fn)); ASSIGN_OR_RETURN( auto metadata, op_registry.LookupOperatorMetadata(op->display_name(), dependency_types), _ << "while processing: " << expr::GetDebugSnippet(node)); return {metadata}; } absl::StatusOr<std::pair<std::unique_ptr<AcyclicCFG>, std::vector<ExprNodePtr>>> BuildEvalCfg(const ExprNodePtr& entry_node) { auto nodes_order = expr::VisitorOrder(entry_node); std::reverse(nodes_order.begin(), nodes_order.end()); absl::flat_hash_map<Fingerprint, NodeId> node_id; node_id.reserve(nodes_order.size()); for (const auto& node : nodes_order) { NodeId id = node_id.size(); node_id[node->fingerprint()] = id; } std::vector<std::vector<NodeId>> deps; deps.reserve(nodes_order.size()); for (const auto& node : nodes_order) { std::vector<NodeId> cur_deps; cur_deps.reserve(node->node_deps().size()); for (const auto& dep : node->node_deps()) { cur_deps.push_back(node_id[dep->fingerprint()]); } deps.push_back(std::move(cur_deps)); } ASSIGN_OR_RETURN(auto graph, AcyclicCFG::Create(std::move(deps))); return {std::pair{std::move(graph), std::move(nodes_order)}}; } std::vector<bool> FindInlinableNodes(const AcyclicCFG& graph) { std::vector<bool> inlinable(graph.num_nodes(), false); std::vector<size_t> inline_depth(graph.num_nodes(), 0); for (NodeId node_id = graph.num_nodes() - 1; node_id > 0; --node_id) { bool used_once = graph.reverse_deps(node_id).size() == 1; if (used_once) { size_t max_inline_depth = 0; for (NodeId dep : graph.deps(node_id)) { max_inline_depth = std::max(max_inline_depth, inline_depth[dep]); } if (max_inline_depth < absl::GetFlag(FLAGS_arolla_codegen_max_allowed_inline_depth)) { inlinable[node_id] = true; inline_depth[node_id] = max_inline_depth + 1; } } } inlinable[0] = true; return inlinable; } class Codegen { public: Codegen(const QExprOperatorMetadataRegistry& op_registry, const AcyclicCFG& graph, std::vector<ExprNodePtr> exprs, absl::flat_hash_map<Fingerprint, QTypePtr> node_qtypes, std::vector<std::string> side_output_names, bool inputs_are_cheap_to_read) : op_registry_(op_registry), graph_(graph), dominator_tree_(graph_), exprs_(std::move(exprs)), node_qtypes_(std::move(node_qtypes)), side_output_names_(std::move(side_output_names)), inputs_are_cheap_to_read_(inputs_are_cheap_to_read) {} absl::StatusOr<OperatorCodegenData> Process() { std::vector<bool> inlinable = FindInlinableNodes(graph_); OperatorCodegenData data; data.side_outputs.reserve(side_output_names_.size()); for (const auto& name : side_output_names_) { data.side_outputs.emplace_back(name, -1); } for (NodeId node_id = graph_.num_nodes() - 1; node_id >= 0; --node_id) { RETURN_IF_ERROR(ProcessSingleNode(node_id, inlinable[node_id], &data)); } for (const auto& [name, assignment_id] : data.side_outputs) { if (assignment_id == -1) { return absl::InternalError(absl::StrFormat( "named output `%s` is lost in transformations", name)); } } ASSIGN_OR_RETURN(data.functions, SplitOnFunctions(data)); FilterArgumentsAsFunction(data); LambdifyFunctions(data); ComputeLocalExprStatus(data); data.output_id = ToAssignmentId(0); return data; } private: absl::StatusOr<QTypePtr> QTypeFromExpr(const ExprNodePtr& node) const { DCHECK(node_qtypes_.contains(node->fingerprint())); auto qtype = node_qtypes_.at(node->fingerprint()); if (qtype == nullptr) { return absl::FailedPreconditionError(absl::StrFormat( "unable to deduce QType for %s", expr::ToDebugString(node))); } return qtype; } LValueId ToAssignmentId(NodeId node_id) const { return graph_.num_nodes() - node_id - 1; } NodeId ToNodeId(LValueId assignment_id) const { return graph_.num_nodes() - assignment_id - 1; } bool IsLiteralNode(NodeId node_id) const { return exprs_[node_id]->is_literal(); } bool IsLeafNode(NodeId node_id) const { return exprs_[node_id]->is_leaf(); } absl::StatusOr<std::vector<bool>> FindSeparableNodes() const { int64_t n = graph_.num_nodes(); absl::flat_hash_set<NodeId> global_nodes; for (int64_t node_id = 0; node_id != n; ++node_id) { if (IsLiteralNode(node_id) || (inputs_are_cheap_to_read_ && IsLeafNode(node_id))) { global_nodes.insert(node_id); } } ASSIGN_OR_RETURN(auto externalized_graph, ExternalizeNodes(graph_, dominator_tree_, global_nodes)); auto is_separable = FindVerticesWithEmptyDominanceFrontier( *externalized_graph, dominator_tree_); for (NodeId node_id = 0; node_id != n; ++node_id) { if (IsLiteralNode(node_id) || IsLeafNode(node_id)) { is_separable[node_id] = false; } } return is_separable; } absl::StatusOr<std::vector<Function>> SplitOnFunctions( OperatorCodegenData& data) const { int64_t n = graph_.num_nodes(); ASSIGN_OR_RETURN(auto is_separable, FindSeparableNodes()); CHECK(is_separable[0] || IsLiteralNode(0) || IsLeafNode(0)) << "InternalError: entry node should be always separable"; std::vector<Function> functions; constexpr int64_t kUndefined = -1; std::vector<int64_t> function_id(n, kUndefined); for (NodeId node_id = n - 1; node_id >= 0; --node_id) { if (is_separable[node_id]) { function_id[node_id] = functions.size(); Function new_fn; new_fn.output_id = ToAssignmentId(node_id); new_fn.is_result_status_or = data.assignments[new_fn.output_id] .lvalue() .is_entire_expr_status_or; functions.push_back(std::move(new_fn)); } } CHECK((function_id[0] != kUndefined) || IsLiteralNode(0) || IsLeafNode(0)) << "InternalError: entry node should be assigned to the function"; for (NodeId node_id = 0; node_id != n; ++node_id) { for (NodeId dep : graph_.deps(node_id)) { if (function_id[dep] == kUndefined) { function_id[dep] = function_id[node_id]; } } } for (NodeId node_id = n - 1; node_id >= 0; --node_id) { LValueId assignment_id = ToAssignmentId(node_id); int64_t cur_function_id = function_id[node_id]; if (IsLiteralNode(node_id)) { continue; } if ((inputs_are_cheap_to_read_ || node_id == 0) && IsLeafNode(node_id)) { continue; } if (!is_separable[node_id]) { functions[cur_function_id].assignment_ids.push_back(assignment_id); for (NodeId rdep : graph_.reverse_deps(node_id)) { CHECK_EQ(function_id[rdep], cur_function_id) << "InternalError: only separable nodes can be used by other " "functions"; } continue; } int64_t rdep_function = kUndefined; for (NodeId rdep : graph_.reverse_deps(node_id)) { if (function_id[rdep] != cur_function_id) { if (rdep_function == kUndefined) { rdep_function = function_id[rdep]; functions[rdep_function].assignment_ids.push_back(assignment_id); } else { CHECK_EQ(rdep_function, function_id[rdep]) << "InternalError: non leaf function node must be used by not " "more than one other function"; } } } } return functions; } void LambdifyFunctions(OperatorCodegenData& data) const { for (Function& function : data.functions) { LambdifyFunction(data, function); } } void ComputeLocalExprStatus(OperatorCodegenData& data) const { absl::flat_hash_map<LValueId, int64_t> id2lambda; for (int64_t i = 0; i < data.lambdas.size(); ++i) { id2lambda.emplace(data.lambdas[i].output_id, i); } absl::flat_hash_map<LValueId, int64_t> id2function; for (int64_t i = 0; i < data.functions.size(); ++i) { id2function.emplace(data.functions[i].output_id, i); } for (LValueId assignment_id = 0; assignment_id != data.assignments.size(); ++assignment_id) { auto& assignment = data.assignments[assignment_id]; bool is_local_expr_status_or = assignment.rvalue().operator_returns_status_or; if (id2function.contains(assignment_id)) { is_local_expr_status_or = data.functions[id2function[assignment_id]].is_result_status_or; } else { std::vector<LValueId> output_assignments = DependencyArgs(ToNodeId(assignment_id)); for (LValueId dep_id : output_assignments) { is_local_expr_status_or = is_local_expr_status_or || (data.assignments[dep_id].is_inlinable() && data.assignments[dep_id].lvalue().is_local_expr_status_or); } if (id2lambda.contains(assignment_id)) { Function& lambda = data.lambdas[id2lambda[assignment_id]]; for (LValueId assignment_id : lambda.assignment_ids) { is_local_expr_status_or |= data.assignments[assignment_id] .lvalue() .is_local_expr_status_or; } lambda.is_result_status_or = is_local_expr_status_or; } } assignment.lvalue().is_local_expr_status_or = is_local_expr_status_or; } } void FilterArgumentsAsFunction(OperatorCodegenData& data) const { for (Assignment& assignment : data.assignments) { RValue& rvalue = assignment.rvalue(); if (rvalue.kind != RValueKind::kFunctionCall && rvalue.kind != RValueKind::kFunctionWithContextCall) { continue; } if (rvalue.argument_as_function_offsets.empty()) { continue; } auto new_end = std::remove_if( rvalue.argument_as_function_offsets.begin(), rvalue.argument_as_function_offsets.end(), [&](int offset) { const Assignment& cur_assignment = data.assignments[rvalue.argument_ids[offset]]; return !cur_assignment.is_inlinable() || cur_assignment.lvalue().kind == LValueKind::kLiteral; }); rvalue.argument_as_function_offsets.erase( new_end, rvalue.argument_as_function_offsets.end()); } } bool IsInlinableAsFunctionArgument(LValueId assignment_id, const OperatorCodegenData& data) const { auto& cur_assignment = data.assignments[assignment_id]; if (cur_assignment.lvalue().kind == LValueKind::kLiteral) { return false; } if (!cur_assignment.is_inlinable()) { return false; } NodeId dominator_node_id = dominator_tree_.parent(ToNodeId(assignment_id)); LValueId dominator_assignment_id = ToAssignmentId(dominator_node_id); auto& parent_assignment = data.assignments[dominator_assignment_id]; const std::vector<LValueId>& parent_arg_ids = parent_assignment.rvalue().argument_ids; int arg_in_parent_id = std::find(parent_arg_ids.begin(), parent_arg_ids.end(), assignment_id) - parent_arg_ids.begin(); const std::vector<int>& argument_as_function_offsets = parent_assignment.rvalue().argument_as_function_offsets; return std::count(argument_as_function_offsets.begin(), argument_as_function_offsets.end(), arg_in_parent_id) != 0; } void LambdifyFunction(OperatorCodegenData& data, Function& function) const { absl::flat_hash_map<int64_t, std::vector<LValueId>> lambda_local_assignments; for (LValueId assignment_id : function.assignment_ids) { auto& cur_assignment = data.assignments[assignment_id]; NodeId node_id = ToNodeId(assignment_id); NodeId dominator_node_id = dominator_tree_.parent(node_id); LValueId dominator_assignment_id = ToAssignmentId(dominator_node_id); auto cur_lambda_assignments = std::move(lambda_local_assignments[assignment_id]); auto& dominator_assignments = lambda_local_assignments[dominator_assignment_id]; bool enough_assignments_for_lambda = cur_lambda_assignments.size() > absl::GetFlag(FLAGS_arolla_codegen_min_local_variables_per_lambda); bool as_function_argument = IsInlinableAsFunctionArgument(assignment_id, data); if (enough_assignments_for_lambda || (as_function_argument && !cur_lambda_assignments.empty())) { data.lambdas.push_back( Function{.assignment_ids = std::move(cur_lambda_assignments), .output_id = assignment_id, .is_result_status_or = false}); cur_assignment.set_inlinable(as_function_argument); } else { dominator_assignments.insert(dominator_assignments.end(), cur_lambda_assignments.begin(), cur_lambda_assignments.end()); } if (!cur_assignment.is_inlinable()) { dominator_assignments.push_back(assignment_id); } } function.assignment_ids = std::move(lambda_local_assignments[function.output_id]); } std::vector<LValueId> DependencyArgs(NodeId node_id) const { const auto deps = graph_.deps(node_id); std::vector<LValueId> deps_vector = std::vector(deps.begin(), deps.end()); for (NodeId& id : deps_vector) { id = ToAssignmentId(id); } return deps_vector; } auto DependencyTypes(NodeId node_id, const OperatorCodegenData& out_data) const { std::vector<QTypePtr> result; result.reserve(graph_.deps(node_id).size()); for (NodeId dep : DependencyArgs(node_id)) { result.push_back(out_data.assignments[dep].lvalue().qtype); } return result; } absl::Status ProcessInternalRootOperator(NodeId node_id, bool inlinable, OperatorCodegenData* out_data) { if (node_id != 0) { return absl::InternalError( "InternalRootOperator can be only in the first node"); } const auto& node = exprs_[node_id]; ASSIGN_OR_RETURN(QTypePtr qtype, QTypeFromExpr(node)); std::string type_name = CppTypeName(qtype).value_or("auto"); std::vector<LValueId> output_assignments = DependencyArgs(node_id); bool is_entire_expr_status_or = false; for (LValueId dep_id : output_assignments) { is_entire_expr_status_or = is_entire_expr_status_or || out_data->assignments[dep_id].lvalue().is_entire_expr_status_or; } out_data->assignments.push_back( Assignment{LValue{.type_name = type_name, .is_entire_expr_status_or = is_entire_expr_status_or, .qtype = qtype, .kind = LValueKind::kLocal}, RValue{.kind = RValueKind::kFirst, .operator_returns_status_or = false, .code = "", .argument_ids = output_assignments}, inlinable}); if (output_assignments.size() < 2) { return absl::InternalError( absl::StrFormat("InternalRootOperator must have at least 2 arguments" ", found: %d", output_assignments.size())); } return absl::OkStatus(); } absl::Status ProcessInternalNamedOutputExportOperator( NodeId node_id, int64_t export_id, bool inlinable, OperatorCodegenData* out_data) { const auto& node = exprs_[node_id]; ASSIGN_OR_RETURN(QTypePtr qtype, QTypeFromExpr(node)); std::string type_name = CppTypeName(qtype).value_or("auto"); std::vector<LValueId> output_assignments = DependencyArgs(node_id); if (output_assignments.size() != 1) { return absl::InternalError( "InternalNamedOutputExportOperator must have 1 argument"); } out_data->assignments.push_back( Assignment{LValue{.type_name = type_name, .is_entire_expr_status_or = out_data->assignments[output_assignments[0]] .lvalue() .is_entire_expr_status_or, .qtype = qtype, .kind = LValueKind::kLocal}, RValue{.kind = RValueKind::kOutput, .operator_returns_status_or = false, .code = std::to_string(export_id), .argument_ids = output_assignments}, inlinable}); if (export_id < 0 || export_id >= side_output_names_.size()) { return absl::InternalError( absl::StrFormat("export_id is out of range: %d", export_id)); } out_data->side_outputs[export_id].second = ToAssignmentId(node_id); return absl::OkStatus(); } absl::Status ProcessDerivedQTypeCastOperator(NodeId node_id, bool inlinable, OperatorCodegenData* out_data) { const auto& node = exprs_[node_id]; ASSIGN_OR_RETURN(QTypePtr qtype, QTypeFromExpr(node)); qtype = DecayDerivedQType(qtype); std::string type_name = CppTypeName(qtype).value_or("auto"); std::vector<LValueId> output_assignments = DependencyArgs(node_id); if (output_assignments.size() != 1) { return absl::InternalError( "DerivedQTypeCastOperator must have 1 argument"); } out_data->assignments.push_back( Assignment{LValue{.type_name = type_name, .is_entire_expr_status_or = out_data->assignments[output_assignments[0]] .lvalue() .is_entire_expr_status_or, .qtype = qtype, .kind = LValueKind::kLocal}, RValue{.kind = RValueKind::kFirst, .operator_returns_status_or = false, .code = "", .argument_ids = output_assignments}, inlinable}); return absl::OkStatus(); } absl::Status ProcessSingleNode(NodeId node_id, bool inlinable, OperatorCodegenData* out_data) { const auto& node = exprs_[node_id]; ASSIGN_OR_RETURN(QTypePtr qtype, QTypeFromExpr(node)); std::string type_name = CppTypeName(qtype).value_or("auto"); switch (node->type()) { case ExprNodeType::kLeaf: { if (type_name == "auto") { return absl::FailedPreconditionError( absl::StrFormat("CppTypeName must be implemented for all inputs. " "Leaf: %s; QType: %s", node->leaf_key(), qtype->name())); } out_data->inputs[node->leaf_key()] = ToAssignmentId(node_id); out_data->assignments.push_back( Assignment{LValue{.type_name = type_name, .is_entire_expr_status_or = false, .qtype = qtype, .kind = LValueKind::kInput}, RValue::CreateInput(), inputs_are_cheap_to_read_ || inlinable}); return absl::OkStatus(); } case ExprNodeType::kPlaceholder: { return absl::FailedPreconditionError( absl::StrFormat("operator generation doesn't support placeholders: " "P.%s found", node->placeholder_key())); } case ExprNodeType::kLiteral: { auto value = node->qvalue(); DCHECK(value); ASSIGN_OR_RETURN(std::string value_repr, CppLiteralRepr(*value)); out_data->assignments.push_back( Assignment{LValue{.type_name = type_name, .is_entire_expr_status_or = false, .qtype = qtype, .kind = LValueKind::kLiteral}, RValue::CreateLiteral(value_repr), codegen_impl::IsInlinableLiteralType(value->GetType())}); return absl::OkStatus(); } case ExprNodeType::kOperator: { ASSIGN_OR_RETURN(auto op, DecayRegisteredOperator(node->op())); if (op == InternalRootOperator()) { return ProcessInternalRootOperator(node_id, inlinable, out_data); } if (auto export_id_opt = MaybeGetExportId(node); export_id_opt.has_value()) { return ProcessInternalNamedOutputExportOperator( node_id, *export_id_opt, inlinable, out_data); } if (typeid(*op) == typeid(expr::DerivedQTypeUpcastOperator) || typeid(*op) == typeid(expr::DerivedQTypeDowncastOperator)) { return ProcessDerivedQTypeCastOperator(node_id, inlinable, out_data); } if (dynamic_cast<const BackendExprOperatorTag*>(op.get()) == nullptr) { return absl::InvalidArgumentError(absl::StrCat( node->op()->display_name(), " is not a backend ExprOperator")); } std::string type_name = CppTypeName(qtype).value_or("auto"); ASSIGN_OR_RETURN(std::optional<QExprOperatorMetadata> op_metadata, GetOperatorMetadata(op_registry_, node, [&](const ExprNodePtr& node) { return this->QTypeFromExpr(node); })); if (!op_metadata.has_value()) { return absl::InternalError(absl::StrCat(node->op()->d
#include "arolla/codegen/expr/codegen_operator.h" #include <cstdint> #include <initializer_list> #include <set> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/statusor.h" #include "arolla/dense_array/qtype/types.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_operator_signature.h" #include "arolla/expr/lambda_expr_operator.h" #include "arolla/expr/testing/testing.h" #include "arolla/qtype/optional_qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/weak_qtype.h" #include "arolla/util/bytes.h" #include "arolla/util/init_arolla.h" #include "arolla/util/text.h" #include "arolla/util/unit.h" namespace arolla::codegen { namespace { using ::arolla::expr::Leaf; using ::arolla::expr::Literal; using ::arolla::testing::WithExportAnnotation; using ::arolla::testing::WithQTypeAnnotation; using ::testing::_; using ::testing::ElementsAre; using ::testing::Eq; using ::testing::IsEmpty; using ::testing::Pair; using ::testing::UnorderedElementsAre; int64_t MinUnused(std::set<int64_t> used) { for (int64_t i = 0; i != used.size(); ++i) { if (used.count(i) == 0) { return i; } } return used.size(); } class CodegenTest : public ::testing::Test { void SetUp() override { ASSERT_OK(InitArolla()); } }; TEST_F(CodegenTest, IsInlinableLiteralTypeTest) { EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<int>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<float>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<double>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<int64_t>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<uint64_t>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<bool>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<Unit>())); EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetQType<Bytes>())); EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetQType<Text>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<int>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<float>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<double>())); EXPECT_TRUE( codegen_impl::IsInlinableLiteralType(GetOptionalQType<int64_t>())); EXPECT_TRUE( codegen_impl::IsInlinableLiteralType(GetOptionalQType<uint64_t>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<bool>())); EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<Unit>())); EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<Bytes>())); EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<Text>())); EXPECT_FALSE( codegen_impl::IsInlinableLiteralType(GetDenseArrayQType<bool>())); EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetDenseArrayQType<int>())); EXPECT_FALSE( codegen_impl::IsInlinableLiteralType(GetDenseArrayQType<float>())); EXPECT_FALSE( codegen_impl::IsInlinableLiteralType(GetDenseArrayQType<double>())); } TEST_F(CodegenTest, SmokeTest) { ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp("math.add", {expr::CallOp("math.add", {WithQTypeAnnotation( Leaf("x"), GetQType<float>()), Literal(1.f)}), WithQTypeAnnotation(Leaf("y"), GetQType<float>())})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_TRUE(op.assignments[input_x_id].is_inlinable()); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_TRUE(op.assignments[input_x_id].is_inlinable()); EXPECT_EQ(op.assignments.size(), 3 + 2 ); int64_t literal_id = MinUnused({input_x_id, input_y_id}); ASSERT_LT(literal_id, op.assignments.size()); EXPECT_THAT(op.assignments[literal_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLiteral})); EXPECT_THAT(op.assignments[literal_id].rvalue(), Eq(RValue::CreateLiteral("float{1.}"))); int64_t tmp0_id = MinUnused({input_x_id, input_y_id, literal_id}); ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {input_x_id, literal_id}})); int64_t tmp1_id = 4; EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {tmp0_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp1_id); EXPECT_THAT(op.function_entry_points(), UnorderedElementsAre(Pair(tmp0_id, 0), Pair(tmp1_id, 1))); } TEST_F(CodegenTest, SmokeWithNonGlobalInputsTest) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp("math.add", {expr::CallOp("math.add", {x, x}), WithQTypeAnnotation( Leaf("y"), GetQType<float>())})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, false)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_FALSE(op.assignments[input_x_id].is_inlinable()); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_TRUE(op.assignments[input_y_id].is_inlinable()); ASSERT_EQ(op.assignments.size(), 2 + 2 ); int64_t tmp0_id = 1; ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {input_x_id, input_x_id}})); int64_t tmp1_id = 3; EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {tmp0_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp1_id); EXPECT_THAT(op.function_entry_points(), UnorderedElementsAre(Pair(tmp0_id, 0), Pair(tmp1_id, 1))); } TEST_F(CodegenTest, SmokeWithStatusOrTest) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto floor_div, expr::CallOp("math.floordiv", {x, y})); ASSERT_OK_AND_ASSIGN(auto expr, expr::CallOp("math.add", {floor_div, y})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .is_local_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .is_local_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 2 + 2 ); int64_t tmp0_id = 2; ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = true, .is_local_expr_status_or = true, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = true, .code = "::arolla::FloorDivOp{}", .argument_ids = {input_x_id, input_y_id}})); int64_t tmp1_id = 3; ASSERT_LT(tmp1_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp1_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = true, .is_local_expr_status_or = true, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {tmp0_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp1_id); } TEST_F(CodegenTest, SmokeWithContextTest) { ASSERT_OK_AND_ASSIGN( auto x, WithQTypeAnnotation(Leaf("x"), GetDenseArrayQType<float>())); ASSERT_OK_AND_ASSIGN( auto y, WithQTypeAnnotation(Leaf("y"), GetDenseArrayQType<float>())); ASSERT_OK_AND_ASSIGN(auto expr, expr::CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "dense_array/qtype/types.h", "arolla/" "qexpr/operators/dense_array/lifter.h", "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre( " "arolla/" "dense_array/qtype", " "arolla/" "qexpr/operators/dense_array:lib", " "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "::arolla::DenseArray<float>", .is_entire_expr_status_or = false, .is_local_expr_status_or = false, .qtype = GetDenseArrayQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "::arolla::DenseArray<float>", .is_entire_expr_status_or = false, .is_local_expr_status_or = false, .qtype = GetDenseArrayQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 1 + 2 ); int64_t tmp0_id = 2; ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "::arolla::DenseArray<float>", .is_entire_expr_status_or = true, .is_local_expr_status_or = true, .qtype = GetDenseArrayQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionWithContextCall, .operator_returns_status_or = true, .code = "::arolla::DenseArrayLifter<::arolla::AddOp, " "::arolla::meta::type_list<float, float>, " "true>{}", .argument_ids = {input_x_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp0_id); } TEST_F(CodegenTest, SmokeTestWithExport) { ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp( "math.add", {WithExportAnnotation( expr::CallOp("math.add", {WithQTypeAnnotation(Leaf("x"), GetQType<float>()), Literal(1.f)}), "output"), WithQTypeAnnotation(Leaf("y"), GetQType<float>())})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 4 + 2 ); int64_t literal_id = MinUnused({input_x_id, input_y_id}); ASSERT_LT(literal_id, op.assignments.size()); EXPECT_THAT(op.assignments[literal_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLiteral})); EXPECT_THAT(op.assignments[literal_id].rvalue(), Eq(RValue::CreateLiteral("float{1.}"))); int64_t tmp0_id = MinUnused({input_x_id, input_y_id, literal_id}); ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()) << "used for output, but export is inside of the expression"; EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {input_x_id, literal_id}})); int64_t tmp1_id = MinUnused({input_x_id, input_y_id, literal_id, tmp0_id}); ASSERT_LT(tmp1_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp1_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kOutput, .operator_returns_status_or = false, .code = "0", .argument_ids = {tmp0_id}})); int64_t tmp2_id = 5; EXPECT_THAT(op.assignments[tmp2_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp2_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {tmp1_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp2_id); EXPECT_THAT(op.side_outputs, ElementsAre(Pair("output", tmp1_id))); } TEST_F(CodegenTest, SmokeTestWithDerivedQTypeDowncast) { ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp("derived_qtype.downcast", {Literal(GetWeakFloatQType()), WithQTypeAnnotation(Leaf("x"), GetQType<double>())})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "double", .is_entire_expr_status_or = false, .qtype = GetQType<double>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 1 + 1 ); int64_t tmp0_id = MinUnused({input_x_id}); ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()) << "used for output, but export is inside of the expression"; EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "double", .is_entire_expr_status_or = false, .qtype = GetQType<double>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFirst, .operator_returns_status_or = false, .code = "", .argument_ids = {input_x_id}})); EXPECT_EQ(op.output_id, tmp0_id); } TEST_F(CodegenTest, SmokeTestWithExportUnusedForMainOutput) { ASSERT_OK_AND_ASSIGN( auto get_first_op, expr::MakeLambdaOperator(expr::ExprOperatorSignature({{"x"}, {"y"}}), expr::Placeholder("x"))); ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp( get_first_op, {WithExportAnnotation( WithQTypeAnnotation(Leaf("y"), GetQType<float>()), "named_main_output"), WithExportAnnotation( expr::CallOp("math.add", {WithQTypeAnnotation(Leaf("x"), GetQType<float>()), Literal(1.f)}), "output")})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 5 + 2 ); int64_t tmp0_id = MinUnused({input_x_id, input_y_id}); ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()) << "used for output, but export is inside of the expression"; EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kOutput, .operator_returns_status_or = false, .code = "0", .argument_ids = {input_y_id}})); int64_t literal_id = MinUnused({input_x_id, input_y_id, tmp0_id}); ASSERT_LT(literal_id, op.assignments.size()); EXPECT_THAT(op.assignments[literal_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLiteral})); EXPECT_THAT(op.assignments[literal_id].rvalue(), Eq(RValue::CreateLiteral("float{1.}"))); int64_t tmp1_id = MinUnused({input_x_id, input_y_id, literal_id, tmp0_id}); ASSERT_LT(tmp1_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp1_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {input_x_id, literal_id}})); int64_t tmp2_id = 5; EXPECT_THAT(op.assignments[tmp2_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp2_id].rvalue(), Eq(RValue{.kind = RValueKind::kOutput, .operator_returns_status_or = false, .code = "1", .argument_ids = {tmp1_id}})); int64_t tmp3_id = 6; EXPECT_THAT(op.assignments[tmp3_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp3_id].rvalue(), Eq(RValue{.kind = RValueKind::kFirst, .operator_returns_status_or = false, .code = "", .argument_ids = {tmp0_id, tmp2_id}})); EXPECT_EQ(op.output_id, tmp3_id); EXPECT_THAT(op.side_outputs, ElementsAre(Pair("named_main_output", tmp0_id), Pair("output", tmp2_id))); } TEST_F(CodegenTest, LambdaAndFunctionSinityTest) { auto lx = WithQTypeAnnotation(Leaf("x"), GetQType<float>()); auto ly = WithQTypeAnnotation(Leaf("y"), GetQType<float>()); auto x = expr::CallOp("math.add", {lx, ly}); auto y = expr::CallOp("math.subtract", {lx, ly}); auto a = expr::CallOp("math.add", {x, y}); auto b = expr::CallOp("math.subtract", {x, y}); constexpr int64_t kChainLength = 500; for (int i = 0; i != kChainLength; ++i) { auto na = expr::CallOp("math.mod", {a, x}); x = a; a = na; auto nb = expr::CallOp("math.mod", {b, y}); y = b; b = nb; } ASSERT_OK_AND_ASSIGN(auto expr, expr::CallOp("math.add", {a, b})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.functions.size(), Eq(3)); for (int i = 0; i != 2; ++i) { EXPECT_THAT(op.functions[i].assignment_ids, IsEmpty()) << i; } EXPECT_THAT(op.functions[2].assignment_ids.size(), Eq(4)); EXPECT_THAT(op.lambdas.size(), Eq(2)); EXPECT_THAT(op.lambdas[0].assignment_ids.size(), Eq(kChainLength - 1)); EXPECT_THAT(op.lambdas[1].assignment_ids.size(), Eq(kChainLength - 1)); } } }
absl::StatusOr<std::vector<QTypePtr>> DependencyTypes( const ExprNodePtr& node, std::function<absl::StatusOr<QTypePtr>(const ExprNodePtr&)> qtype_from_expr_fn) { std::vector<QTypePtr> result; result.reserve(node->node_deps().size()); for (const ExprNodePtr& dep : node->node_deps()) { ASSIGN_OR_RETURN(result.emplace_back(), qtype_from_expr_fn(dep)); } return result; }
TEST_F(CodegenTest, SmokeTest) { ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp("math.add", {expr::CallOp("math.add", {WithQTypeAnnotation( Leaf("x"), GetQType<float>()), Literal(1.f)}), WithQTypeAnnotation(Leaf("y"), GetQType<float>())})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_TRUE(op.assignments[input_x_id].is_inlinable()); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_TRUE(op.assignments[input_x_id].is_inlinable()); EXPECT_EQ(op.assignments.size(), 3 + 2 ); int64_t literal_id = MinUnused({input_x_id, input_y_id}); ASSERT_LT(literal_id, op.assignments.size()); EXPECT_THAT(op.assignments[literal_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLiteral})); EXPECT_THAT(op.assignments[literal_id].rvalue(), Eq(RValue::CreateLiteral("float{1.}"))); int64_t tmp0_id = MinUnused({input_x_id, input_y_id, literal_id}); ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {input_x_id, literal_id}})); int64_t tmp1_id = 4; EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {tmp0_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp1_id); EXPECT_THAT(op.function_entry_points(), UnorderedElementsAre(Pair(tmp0_id, 0), Pair(tmp1_id, 1))); } TEST_F(CodegenTest, SmokeWithNonGlobalInputsTest) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp("math.add", {expr::CallOp("math.add", {x, x}), WithQTypeAnnotation( Leaf("y"), GetQType<float>())})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, false)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_FALSE(op.assignments[input_x_id].is_inlinable()); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_TRUE(op.assignments[input_y_id].is_inlinable()); ASSERT_EQ(op.assignments.size(), 2 + 2 ); int64_t tmp0_id = 1; ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {input_x_id, input_x_id}})); int64_t tmp1_id = 3; EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {tmp0_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp1_id); EXPECT_THAT(op.function_entry_points(), UnorderedElementsAre(Pair(tmp0_id, 0), Pair(tmp1_id, 1))); } TEST_F(CodegenTest, SmokeWithStatusOrTest) { ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto floor_div, expr::CallOp("math.floordiv", {x, y})); ASSERT_OK_AND_ASSIGN(auto expr, expr::CallOp("math.add", {floor_div, y})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .is_local_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .is_local_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 2 + 2 ); int64_t tmp0_id = 2; ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = true, .is_local_expr_status_or = true, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = true, .code = "::arolla::FloorDivOp{}", .argument_ids = {input_x_id, input_y_id}})); int64_t tmp1_id = 3; ASSERT_LT(tmp1_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp1_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = true, .is_local_expr_status_or = true, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {tmp0_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp1_id); } TEST_F(CodegenTest, SmokeWithContextTest) { ASSERT_OK_AND_ASSIGN( auto x, WithQTypeAnnotation(Leaf("x"), GetDenseArrayQType<float>())); ASSERT_OK_AND_ASSIGN( auto y, WithQTypeAnnotation(Leaf("y"), GetDenseArrayQType<float>())); ASSERT_OK_AND_ASSIGN(auto expr, expr::CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "dense_array/qtype/types.h", "arolla/" "qexpr/operators/dense_array/lifter.h", "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre( " "arolla/" "dense_array/qtype", " "arolla/" "qexpr/operators/dense_array:lib", " "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "::arolla::DenseArray<float>", .is_entire_expr_status_or = false, .is_local_expr_status_or = false, .qtype = GetDenseArrayQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "::arolla::DenseArray<float>", .is_entire_expr_status_or = false, .is_local_expr_status_or = false, .qtype = GetDenseArrayQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 1 + 2 ); int64_t tmp0_id = 2; ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "::arolla::DenseArray<float>", .is_entire_expr_status_or = true, .is_local_expr_status_or = true, .qtype = GetDenseArrayQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionWithContextCall, .operator_returns_status_or = true, .code = "::arolla::DenseArrayLifter<::arolla::AddOp, " "::arolla::meta::type_list<float, float>, " "true>{}", .argument_ids = {input_x_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp0_id); } TEST_F(CodegenTest, SmokeTestWithExport) { ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp( "math.add", {WithExportAnnotation( expr::CallOp("math.add", {WithQTypeAnnotation(Leaf("x"), GetQType<float>()), Literal(1.f)}), "output"), WithQTypeAnnotation(Leaf("y"), GetQType<float>())})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 4 + 2 ); int64_t literal_id = MinUnused({input_x_id, input_y_id}); ASSERT_LT(literal_id, op.assignments.size()); EXPECT_THAT(op.assignments[literal_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLiteral})); EXPECT_THAT(op.assignments[literal_id].rvalue(), Eq(RValue::CreateLiteral("float{1.}"))); int64_t tmp0_id = MinUnused({input_x_id, input_y_id, literal_id}); ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()) << "used for output, but export is inside of the expression"; EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {input_x_id, literal_id}})); int64_t tmp1_id = MinUnused({input_x_id, input_y_id, literal_id, tmp0_id}); ASSERT_LT(tmp1_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp1_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kOutput, .operator_returns_status_or = false, .code = "0", .argument_ids = {tmp0_id}})); int64_t tmp2_id = 5; EXPECT_THAT(op.assignments[tmp2_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp2_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {tmp1_id, input_y_id}})); EXPECT_EQ(op.output_id, tmp2_id); EXPECT_THAT(op.side_outputs, ElementsAre(Pair("output", tmp1_id))); } TEST_F(CodegenTest, SmokeTestWithDerivedQTypeDowncast) { ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp("derived_qtype.downcast", {Literal(GetWeakFloatQType()), WithQTypeAnnotation(Leaf("x"), GetQType<double>())})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "double", .is_entire_expr_status_or = false, .qtype = GetQType<double>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 1 + 1 ); int64_t tmp0_id = MinUnused({input_x_id}); ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()) << "used for output, but export is inside of the expression"; EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "double", .is_entire_expr_status_or = false, .qtype = GetQType<double>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kFirst, .operator_returns_status_or = false, .code = "", .argument_ids = {input_x_id}})); EXPECT_EQ(op.output_id, tmp0_id); } TEST_F(CodegenTest, SmokeTestWithExportUnusedForMainOutput) { ASSERT_OK_AND_ASSIGN( auto get_first_op, expr::MakeLambdaOperator(expr::ExprOperatorSignature({{"x"}, {"y"}}), expr::Placeholder("x"))); ASSERT_OK_AND_ASSIGN( auto expr, expr::CallOp( get_first_op, {WithExportAnnotation( WithQTypeAnnotation(Leaf("y"), GetQType<float>()), "named_main_output"), WithExportAnnotation( expr::CallOp("math.add", {WithQTypeAnnotation(Leaf("x"), GetQType<float>()), Literal(1.f)}), "output")})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.headers, ElementsAre( "arolla/" "qexpr/operators/math/arithmetic.h")); EXPECT_THAT(op.deps, ElementsAre(" "arolla/" "qexpr/operators/math:lib")); EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _))); int64_t input_x_id = op.inputs["x"]; EXPECT_THAT(op.assignments[input_x_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput())); int64_t input_y_id = op.inputs["y"]; EXPECT_THAT(op.assignments[input_y_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kInput})); EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput())); EXPECT_EQ(op.assignments.size(), 5 + 2 ); int64_t tmp0_id = MinUnused({input_x_id, input_y_id}); ASSERT_LT(tmp0_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable()) << "used for output, but export is inside of the expression"; EXPECT_THAT(op.assignments[tmp0_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp0_id].rvalue(), Eq(RValue{.kind = RValueKind::kOutput, .operator_returns_status_or = false, .code = "0", .argument_ids = {input_y_id}})); int64_t literal_id = MinUnused({input_x_id, input_y_id, tmp0_id}); ASSERT_LT(literal_id, op.assignments.size()); EXPECT_THAT(op.assignments[literal_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLiteral})); EXPECT_THAT(op.assignments[literal_id].rvalue(), Eq(RValue::CreateLiteral("float{1.}"))); int64_t tmp1_id = MinUnused({input_x_id, input_y_id, literal_id, tmp0_id}); ASSERT_LT(tmp1_id, op.assignments.size()); EXPECT_TRUE(op.assignments[tmp1_id].is_inlinable()); EXPECT_THAT(op.assignments[tmp1_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp1_id].rvalue(), Eq(RValue{.kind = RValueKind::kFunctionCall, .operator_returns_status_or = false, .code = "::arolla::AddOp{}", .argument_ids = {input_x_id, literal_id}})); int64_t tmp2_id = 5; EXPECT_THAT(op.assignments[tmp2_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp2_id].rvalue(), Eq(RValue{.kind = RValueKind::kOutput, .operator_returns_status_or = false, .code = "1", .argument_ids = {tmp1_id}})); int64_t tmp3_id = 6; EXPECT_THAT(op.assignments[tmp3_id].lvalue(), Eq(LValue{.type_name = "float", .is_entire_expr_status_or = false, .qtype = GetQType<float>(), .kind = LValueKind::kLocal})); EXPECT_THAT(op.assignments[tmp3_id].rvalue(), Eq(RValue{.kind = RValueKind::kFirst, .operator_returns_status_or = false, .code = "", .argument_ids = {tmp0_id, tmp2_id}})); EXPECT_EQ(op.output_id, tmp3_id); EXPECT_THAT(op.side_outputs, ElementsAre(Pair("named_main_output", tmp0_id), Pair("output", tmp2_id))); } TEST_F(CodegenTest, LambdaAndFunctionSinityTest) { auto lx = WithQTypeAnnotation(Leaf("x"), GetQType<float>()); auto ly = WithQTypeAnnotation(Leaf("y"), GetQType<float>()); auto x = expr::CallOp("math.add", {lx, ly}); auto y = expr::CallOp("math.subtract", {lx, ly}); auto a = expr::CallOp("math.add", {x, y}); auto b = expr::CallOp("math.subtract", {x, y}); constexpr int64_t kChainLength = 500; for (int i = 0; i != kChainLength; ++i) { auto na = expr::CallOp("math.mod", {a, x}); x = a; a = na; auto nb = expr::CallOp("math.mod", {b, y}); y = b; b = nb; } ASSERT_OK_AND_ASSIGN(auto expr, expr::CallOp("math.add", {a, b})); ASSERT_OK_AND_ASSIGN( OperatorCodegenData op, GenerateOperatorCode(expr, true)); EXPECT_THAT(op.functions.size(), Eq(3)); for (int i = 0; i != 2; ++i) { EXPECT_THAT(op.functions[i].assignment_ids, IsEmpty()) << i; } EXPECT_THAT(op.functions[2].assignment_ids.size(), Eq(4)); EXPECT_THAT(op.lambdas.size(), Eq(2)); EXPECT_THAT(op.lambdas[0].assignment_ids.size(), Eq(kChainLength - 1)); EXPECT_THAT(op.lambdas[1].assignment_ids.size(), Eq(kChainLength - 1)); }
#include <string> #include <utility> #include "tensorflow/lite/tools/delegates/delegate_provider.h" #include "tensorflow/lite/tools/evaluation/utils.h" namespace tflite { namespace tools { class XnnpackDelegateProvider : public DelegateProvider { public: XnnpackDelegateProvider() { default_params_.AddParam("use_xnnpack", ToolParam::Create<bool>(false)); default_params_.AddParam("xnnpack_force_fp16", ToolParam::Create<bool>(false)); default_params_.AddParam("xnnpack_experimental_weight_cache_file_path", ToolParam::Create<std::string>("")); } std::vector<Flag> CreateFlags(ToolParams* params) const final; void LogParams(const ToolParams& params, bool verbose) const final; TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final; std::pair<TfLiteDelegatePtr, int> CreateRankedTfLiteDelegate( const ToolParams& params) const final; std::string GetName() const final { return "XNNPACK"; } }; REGISTER_DELEGATE_PROVIDER(XnnpackDelegateProvider); std::vector<Flag> XnnpackDelegateProvider::CreateFlags( ToolParams* params) const { std::vector<Flag> flags = { CreateFlag<bool>("use_xnnpack", params, "explicitly apply the XNNPACK delegate. Note the " "XNNPACK delegate could " "be implicitly applied by the TF Lite runtime " "regardless the value of " "this parameter. To disable this implicit application, " "set the value to " "false explicitly."), CreateFlag<bool>("xnnpack_force_fp16", params, "enforce float16 inference."), CreateFlag<std::string>("xnnpack_experimental_weight_cache_file_path", params, "enable file-backed weight caching."), }; return flags; } void XnnpackDelegateProvider::LogParams(const ToolParams& params, bool verbose) const { LOG_TOOL_PARAM(params, bool, "use_xnnpack", "Use xnnpack", verbose); LOG_TOOL_PARAM(params, bool, "xnnpack_force_fp16", "xnnpack_force_fp16", verbose); LOG_TOOL_PARAM(params, std::string, "xnnpack_experimental_weight_cache_file_path", "xnnpack_experimental_weight_cache_file_path", verbose); } TfLiteDelegatePtr XnnpackDelegateProvider::CreateTfLiteDelegate( const ToolParams& params) const { if (params.Get<bool>("use_xnnpack")) { return evaluation::CreateXNNPACKDelegate( params.Get<int32_t>("num_threads"), params.Get<bool>("xnnpack_force_fp16"), params.Get<std::string>("xnnpack_experimental_weight_cache_file_path") .c_str()); } return CreateNullDelegate(); } std::pair<TfLiteDelegatePtr, int> XnnpackDelegateProvider::CreateRankedTfLiteDelegate( const ToolParams& params) const { auto ptr = CreateTfLiteDelegate(params); return std::make_pair(std::move(ptr), params.GetPosition<bool>("use_xnnpack")); } } }
#include <cstdint> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" #include "tensorflow/lite/tools/delegates/delegate_provider.h" #include "tensorflow/lite/tools/tool_params.h" namespace tflite { namespace tools { namespace { TEST(XNNPackDelegateProviderTest, Test) { const std::string kFakeCacheParam = testing::TempDir() + "/XNNPackDelegateProviderTest.xnnpack_cache"; const auto& providers = GetRegisteredDelegateProviders(); ASSERT_EQ(providers.size(), 1); ToolParams params; const auto& xnnpack_provider = providers[0]; ASSERT_NE(xnnpack_provider, nullptr); params.Merge(xnnpack_provider->DefaultParams()); params.AddParam("num_threads", ToolParam::Create<int32_t>(-1)); EXPECT_TRUE(params.HasParam("use_xnnpack")); EXPECT_FALSE(params.HasValueSet<bool>("use_xnnpack")); ASSERT_NE(params.GetParam("use_xnnpack"), nullptr); EXPECT_TRUE(params.HasParam("xnnpack_force_fp16")); EXPECT_FALSE(params.HasValueSet<bool>("xnnpack_force_fp16")); ASSERT_NE(params.GetParam("xnnpack_force_fp16"), nullptr); EXPECT_TRUE(params.HasParam("xnnpack_experimental_weight_cache_file_path")); EXPECT_FALSE(params.HasValueSet<std::string>( "xnnpack_experimental_weight_cache_file_path")); ASSERT_NE(params.GetParam("xnnpack_experimental_weight_cache_file_path"), nullptr); params.Set<bool>("use_xnnpack", true, 0); { TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params); const TfLiteXNNPackDelegateOptions* options = TfLiteXNNPackDelegateGetOptions(delegate.get()); ASSERT_NE(options, nullptr); EXPECT_EQ(options->experimental_weight_cache_file_path, nullptr); } params.Set<bool>("xnnpack_force_fp16", true, 1); params.Set<std::string>("xnnpack_experimental_weight_cache_file_path", kFakeCacheParam, 2); { TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params); const TfLiteXNNPackDelegateOptions* options = TfLiteXNNPackDelegateGetOptions(delegate.get()); ASSERT_NE(options, nullptr); EXPECT_THAT(options->experimental_weight_cache_file_path, testing::StrEq(kFakeCacheParam)); EXPECT_TRUE(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16); } } } } }
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final; std::pair<TfLiteDelegatePtr, int> CreateRankedTfLiteDelegate( const ToolParams& params) const final; std::string GetName() const final { return "XNNPACK"; }
#include <cstdint> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" #include "tensorflow/lite/tools/delegates/delegate_provider.h" #include "tensorflow/lite/tools/tool_params.h" namespace tflite { namespace tools { namespace { TEST(XNNPackDelegateProviderTest, Test) { const std::string kFakeCacheParam = testing::TempDir() + "/XNNPackDelegateProviderTest.xnnpack_cache"; const auto& providers = GetRegisteredDelegateProviders(); ASSERT_EQ(providers.size(), 1); ToolParams params; const auto& xnnpack_provider = providers[0]; ASSERT_NE(xnnpack_provider, nullptr); params.Merge(xnnpack_provider->DefaultParams()); params.AddParam("num_threads", ToolParam::Create<int32_t>(-1)); EXPECT_TRUE(params.HasParam("use_xnnpack")); EXPECT_FALSE(params.HasValueSet<bool>("use_xnnpack")); ASSERT_NE(params.GetParam("use_xnnpack"), nullptr); EXPECT_TRUE(params.HasParam("xnnpack_force_fp16")); EXPECT_FALSE(params.HasValueSet<bool>("xnnpack_force_fp16")); ASSERT_NE(params.GetParam("xnnpack_force_fp16"), nullptr); EXPECT_TRUE(params.HasParam("xnnpack_experimental_weight_cache_file_path")); EXPECT_FALSE(params.HasValueSet<std::string>( "xnnpack_experimental_weight_cache_file_path")); ASSERT_NE(params.GetParam("xnnpack_experimental_weight_cache_file_path"), nullptr); params.Set<bool>("use_xnnpack", true, 0); { TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params); const TfLiteXNNPackDelegateOptions* options = TfLiteXNNPackDelegateGetOptions(delegate.get()); ASSERT_NE(options, nullptr); EXPECT_EQ(options->experimental_weight_cache_file_path, nullptr); } params.Set<bool>("xnnpack_force_fp16", true, 1); params.Set<std::string>("xnnpack_experimental_weight_cache_file_path", kFakeCacheParam, 2); { TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params); const TfLiteXNNPackDelegateOptions* options = TfLiteXNNPackDelegateGetOptions(delegate.get()); ASSERT_NE(options, nullptr); EXPECT_THAT(options->experimental_weight_cache_file_path, testing::StrEq(kFakeCacheParam)); EXPECT_TRUE(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16); } }
#include "xla/status_macros.h" #include <algorithm> #include <string> #include "absl/base/attributes.h" #include "absl/base/optimization.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "tsl/platform/logging.h" #include "tsl/platform/stacktrace.h" #include "tsl/platform/status.h" namespace xla { namespace status_macros { ABSL_CONST_INIT const char kPossibleAutoJitAlternative[] = "This error might be occurring with the use of xla.compile. If it is not " "necessary that every Op be compiled with XLA, an alternative is to use " "auto_jit with OptimizerOptions.global_jit_level = ON_2 or the environment " "variable TF_XLA_FLAGS=\"tf_xla_auto_jit=2\" which will attempt to use xla " "to compile as much of the graph as the compiler is able to."; static void LogError(const absl::Status& status, const char* filename, int line, int log_severity, bool should_log_stack_trace) { if (ABSL_PREDICT_TRUE(log_severity != tsl::NUM_SEVERITIES)) { std::string stack_trace; if (should_log_stack_trace) { stack_trace = absl::StrCat("\n", tsl::CurrentStackTrace()); } switch (log_severity) { case tsl::INFO: LOG(INFO) << status << stack_trace; break; case tsl::WARNING: LOG(WARNING) << status << stack_trace; break; case tsl::ERROR: LOG(ERROR) << status << stack_trace; break; case tsl::FATAL: LOG(FATAL) << status << stack_trace; break; case tsl::NUM_SEVERITIES: break; default: LOG(FATAL) << "Unknown LOG severity " << log_severity; } } } static absl::Status MakeError(const char* filename, int line, absl::StatusCode code, const std::string& message, bool should_log, int log_severity, bool should_log_stack_trace) { if (ABSL_PREDICT_FALSE(code == absl::StatusCode::kOk)) { LOG(ERROR) << "Cannot create error with status OK"; code = absl::StatusCode::kUnknown; } const absl::Status status = absl::Status(code, message); if (ABSL_PREDICT_TRUE(should_log)) { LogError(status, filename, line, log_severity, should_log_stack_trace); } return status; } MakeErrorStream::MakeErrorStreamWithOutput& MakeErrorStream::add_ret_check_failure(const char* condition) { return *this << "RET_CHECK failure (" << impl_->file_ << ":" << impl_->line_ << ") " << condition << " "; } void MakeErrorStream::CheckNotDone() const { impl_->CheckNotDone(); } MakeErrorStream::Impl::Impl(const char* file, int line, tsl::error::Code code, MakeErrorStream* error_stream, bool is_logged_by_default) : file_(file), line_(line), code_(static_cast<absl::StatusCode>(code)), is_done_(false), should_log_(is_logged_by_default), log_severity_(tsl::ERROR), should_log_stack_trace_(false), make_error_stream_with_output_wrapper_(error_stream) {} MakeErrorStream::Impl::Impl(const absl::Status& status, PriorMessageHandling prior_message_handling, const char* file, int line, MakeErrorStream* error_stream) : file_(file), line_(line), code_(!status.ok() ? static_cast<absl::StatusCode>(status.code()) : absl::StatusCode::kUnknown), prior_message_handling_(prior_message_handling), prior_message_(status.message()), is_done_(false), should_log_(true), log_severity_(tsl::ERROR), should_log_stack_trace_(false), make_error_stream_with_output_wrapper_(error_stream) { DCHECK(!status.ok()) << "Attempted to append/prepend error text to status OK"; } MakeErrorStream::Impl::~Impl() { if (!is_done_) { LOG(ERROR) << "MakeErrorStream destructed without getting absl::Status: " << file_ << ":" << line_ << " " << stream_.str(); } } absl::Status MakeErrorStream::Impl::GetStatus() { if (is_done_) { LOG(ERROR) << "MakeErrorStream got absl::Status more than once: " << file_ << ":" << line_ << " " << stream_.str(); } is_done_ = true; const std::string& stream_str = stream_.str(); const std::string str = prior_message_handling_ == kAppendToPriorMessage ? absl::StrCat(prior_message_, stream_str) : absl::StrCat(stream_str, prior_message_); if (ABSL_PREDICT_FALSE(str.empty())) { return MakeError( file_, line_, code_, absl::StrCat(str, "Error without message at ", file_, ":", line_), true , tsl::ERROR , should_log_stack_trace_); } else { return MakeError(file_, line_, code_, str, should_log_, log_severity_, should_log_stack_trace_); } } void MakeErrorStream::Impl::CheckNotDone() const { if (is_done_) { LOG(ERROR) << "MakeErrorStream shift called after getting absl::Status: " << file_ << ":" << line_ << " " << stream_.str(); } } } }
#include "xla/status_macros.h" #include <functional> #include <utility> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { absl::Status RetCheckFail() { TF_RET_CHECK(2 > 3); return absl::OkStatus(); } absl::Status RetCheckFailWithExtraMessage() { TF_RET_CHECK(2 > 3) << "extra message"; return absl::OkStatus(); } absl::Status RetCheckSuccess() { TF_RET_CHECK(3 > 2); return absl::OkStatus(); } TEST(StatusMacros, RetCheckFailing) { absl::Status status = RetCheckFail(); EXPECT_EQ(status.code(), tsl::error::INTERNAL); EXPECT_THAT(status.message(), ::testing::ContainsRegex("RET_CHECK failure.*2 > 3")); } TEST(StatusMacros, RetCheckFailingWithExtraMessage) { absl::Status status = RetCheckFailWithExtraMessage(); EXPECT_EQ(status.code(), tsl::error::INTERNAL); EXPECT_THAT(status.message(), ::testing::ContainsRegex("RET_CHECK.*2 > 3 extra message")); } TEST(StatusMacros, RetCheckSucceeding) { absl::Status status = RetCheckSuccess(); EXPECT_IS_OK(status); } absl::StatusOr<int> CreateIntSuccessfully() { return 42; } absl::StatusOr<int> CreateIntUnsuccessfully() { return tsl::errors::Internal("foobar"); } TEST(StatusMacros, AssignOrAssertOnOK) { TF_ASSERT_OK_AND_ASSIGN(int result, CreateIntSuccessfully()); EXPECT_EQ(42, result); } absl::Status ReturnStatusOK() { return absl::OkStatus(); } absl::Status ReturnStatusError() { return (tsl::errors::Internal("foobar")); } using StatusReturningFunction = std::function<absl::Status()>; absl::StatusOr<int> CallStatusReturningFunction( const StatusReturningFunction& func) { TF_RETURN_IF_ERROR(func()); return 42; } TEST(StatusMacros, ReturnIfErrorOnOK) { absl::StatusOr<int> rc = CallStatusReturningFunction(ReturnStatusOK); EXPECT_IS_OK(rc); EXPECT_EQ(42, std::move(rc).value()); } TEST(StatusMacros, ReturnIfErrorOnError) { absl::StatusOr<int> rc = CallStatusReturningFunction(ReturnStatusError); EXPECT_FALSE(rc.ok()); EXPECT_EQ(rc.status().code(), tsl::error::INTERNAL); } TEST(StatusMacros, AssignOrReturnSuccessfully) { absl::Status status = []() { TF_ASSIGN_OR_RETURN(int value, CreateIntSuccessfully()); EXPECT_EQ(value, 42); return absl::OkStatus(); }(); EXPECT_IS_OK(status); } TEST(StatusMacros, AssignOrReturnUnsuccessfully) { absl::Status status = []() { TF_ASSIGN_OR_RETURN(int value, CreateIntUnsuccessfully()); (void)value; return absl::OkStatus(); }(); EXPECT_FALSE(status.ok()); EXPECT_EQ(status.code(), tsl::error::INTERNAL); } }
absl::Status MakeErrorStream::Impl::GetStatus() { if (is_done_) { LOG(ERROR) << "MakeErrorStream got absl::Status more than once: " << file_ << ":" << line_ << " " << stream_.str(); } is_done_ = true; const std::string& stream_str = stream_.str(); const std::string str = prior_message_handling_ == kAppendToPriorMessage ? absl::StrCat(prior_message_, stream_str) : absl::StrCat(stream_str, prior_message_); if (ABSL_PREDICT_FALSE(str.empty())) { return MakeError( file_, line_, code_, absl::StrCat(str, "Error without message at ", file_, ":", line_), true , tsl::ERROR , should_log_stack_trace_); } else { return MakeError(file_, line_, code_, str, should_log_, log_severity_, should_log_stack_trace_); } }
TEST(StatusMacros, AssignOrAssertOnOK) { TF_ASSERT_OK_AND_ASSIGN(int result, CreateIntSuccessfully()); EXPECT_EQ(42, result); } TEST(StatusMacros, ReturnIfErrorOnOK) { absl::StatusOr<int> rc = CallStatusReturningFunction(ReturnStatusOK); EXPECT_IS_OK(rc); EXPECT_EQ(42, std::move(rc).value()); } TEST(StatusMacros, ReturnIfErrorOnError) { absl::StatusOr<int> rc = CallStatusReturningFunction(ReturnStatusError); EXPECT_FALSE(rc.ok()); EXPECT_EQ(rc.status().code(), tsl::error::INTERNAL); } TEST(StatusMacros, AssignOrReturnSuccessfully) { absl::Status status = []() { TF_ASSIGN_OR_RETURN(int value, CreateIntSuccessfully()); EXPECT_EQ(value, 42); return absl::OkStatus(); }(); EXPECT_IS_OK(status); } TEST(StatusMacros, AssignOrReturnUnsuccessfully) { absl::Status status = []() { TF_ASSIGN_OR_RETURN(int value, CreateIntUnsuccessfully()); (void)value; return absl::OkStatus(); }(); EXPECT_FALSE(status.ok()); EXPECT_EQ(status.code(), tsl::error::INTERNAL); }
#include "tensorflow/core/runtime_fallback/util/attr_util.h" #include <algorithm> #include <cstdlib> #include <cstring> #include <string> #include <utility> #include <vector> #include "absl/strings/numbers.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/tfrt/utils/tensor_util.h" #include "tfrt/core_runtime/op_attrs.h" #include "tfrt/host_context/attribute_utils.h" #include "tfrt/support/error_util.h" #include "tfrt/support/forward_decls.h" #include "tfrt/support/logging.h" #include "tfrt/tensor/dense_host_tensor.h" #include "tfrt/tensor/tensor_serialize_utils.h" namespace tensorflow { namespace tfd { namespace { using ::tensorflow::protobuf::RepeatedFieldBackInserter; using ::tfrt::AggregateAttr; using ::tfrt::BEFAttributeType; using ::tfrt::DenseAttr; using ::tfrt::DenseHostTensor; using ::tfrt::HostContext; using ::tfrt::OpAttrsRawEntry; using ::tfrt::OpAttrsRef; using ::tfrt::OpAttrType; using ::tfrt::string_view; llvm::Expected<tensorflow::Tensor> DecodeDenseAttrToTfTensor( const DenseAttr& dense_attr, HostContext* host) { llvm::Expected<DenseHostTensor> dht = tfrt::DeserializeDenseHostTensorFromDenseAttr(dense_attr, host); if (!dht) { return tfrt::MakeStringError( "Cannot create DenseHostTensor in DecodeDenseAttrToTensorInterface: ", dht.takeError()); } return tfrt::TFRTTensorToTFTensor(*dht); } llvm::Error FillAttrValueMapUsingArray(const OpAttrsRawEntry& entry, AttrValue& attr_tmp, const OpAttrsRef& attrs) { attr_tmp.mutable_list()->Clear(); if (entry.element_count == 0) { if (entry.type == OpAttrType::CHAR) { attr_tmp.set_s(""); } return llvm::Error::success(); } switch (entry.type) { case OpAttrType::CHAR: { string_view attr_value = attrs.GetStringAsserting(entry.name); attr_tmp.set_s(attr_value.data(), attr_value.size()); return llvm::Error::success(); } case OpAttrType::FUNC: { string_view attr_value = attrs.GetFuncNameAsserting(entry.name); attr_tmp.mutable_func()->set_name(attr_value.data(), attr_value.size()); return llvm::Error::success(); } case OpAttrType::I64: { llvm::ArrayRef<int64_t> int_array = attrs.GetArrayAsserting<int64_t>(entry.name); auto* mutable_i = attr_tmp.mutable_list()->mutable_i(); std::copy(int_array.begin(), int_array.end(), RepeatedFieldBackInserter(mutable_i)); return llvm::Error::success(); } case OpAttrType::F32: { llvm::ArrayRef<float> float_array = attrs.GetArrayAsserting<float>(entry.name); auto* mutable_f = attr_tmp.mutable_list()->mutable_f(); std::copy(float_array.begin(), float_array.end(), RepeatedFieldBackInserter(mutable_f)); return llvm::Error::success(); } case OpAttrType::BOOL: { llvm::ArrayRef<bool> bool_array = attrs.GetArrayAsserting<bool>(entry.name); auto mutable_b = attr_tmp.mutable_list()->mutable_b(); std::copy(bool_array.begin(), bool_array.end(), RepeatedFieldBackInserter(mutable_b)); return llvm::Error::success(); } case OpAttrType::DTYPE: { const auto& op_attr = attrs.GetRawAsserting(entry.name); assert(op_attr.IsArray()); auto bef_dtypes = llvm::ArrayRef(static_cast<const tfrt::DType*>(op_attr.GetData()), op_attr.element_count); llvm::SmallVector<tensorflow::DataType, 4> tf_dtypes; tf_dtypes.reserve(bef_dtypes.size()); for (auto bef_dtype : bef_dtypes) { tf_dtypes.push_back(ConvertBefAttrTypeToTfDataType(bef_dtype)); } auto* mutable_type = attr_tmp.mutable_list()->mutable_type(); std::copy(tf_dtypes.begin(), tf_dtypes.end(), RepeatedFieldBackInserter(mutable_type)); return llvm::Error::success(); } default: return tfrt::MakeStringError("unsupported array attribute type"); } } llvm::Error FillAttrValueMapUsingAggregate(const OpAttrsRawEntry& entry, AttrValue& attr_tmp, const OpAttrsRef& attrs) { AggregateAttr list_attr = attrs.GetAsserting<AggregateAttr>(entry.name); int num_values = list_attr.GetNumElements(); if (num_values == 0) { attr_tmp.mutable_list(); return llvm::Error::success(); } auto attr_base = list_attr.GetAttribute(0); auto* mutable_list = attr_tmp.mutable_list(); mutable_list->Clear(); if (IsDataTypeAttribute(attr_base.type()) && GetDataType(attr_base.type()) == tfrt::DType::String) { auto* mutable_s = mutable_list->mutable_s(); mutable_s->Reserve(num_values); for (int i = 0; i < num_values; ++i) { auto string_attr = list_attr.GetAttributeOfType<tfrt::StringAttr>(i); mutable_list->add_s(string_attr.GetValue().data(), string_attr.GetValue().size()); } } else if (attr_base.type() == BEFAttributeType::kFunc) { auto* mutable_f = mutable_list->mutable_func(); mutable_f->Reserve(num_values); for (int i = 0; i < num_values; ++i) { auto func_attr = list_attr.GetAttributeOfType<tfrt::FuncAttr>(i); auto mutable_func = mutable_list->add_func(); mutable_func->set_name(func_attr.GetFunctionName().str()); } } else if (attr_base.type() == BEFAttributeType::kShape) { auto* mutable_list = attr_tmp.mutable_list(); auto* mutable_shape = mutable_list->mutable_shape(); mutable_shape->Reserve(num_values); for (int i = 0; i < num_values; ++i) { auto shape_attr = list_attr.GetAttributeOfType<tfrt::ShapeAttr>(i); auto* added_shape = mutable_list->add_shape(); if (shape_attr.HasRank()) { int rank = shape_attr.GetRank(); auto shape = shape_attr.GetShape(); added_shape->mutable_dim()->Reserve(rank); for (int d = 0; d < rank; ++d) { added_shape->add_dim()->set_size(shape[d]); } } else { added_shape->set_unknown_rank(true); } } } else { return tfrt::MakeStringError("unsupported list attribute type"); } return llvm::Error::success(); } llvm::Error FillAttrValueMapUsingScalar(const OpAttrsRawEntry& entry, AttrValue& attr_tmp, HostContext* host, const OpAttrsRef& attrs) { switch (entry.type) { case OpAttrType::I64: { int64_t attr_value = attrs.GetAsserting<int64_t>(entry.name); attr_tmp.set_i(attr_value); return llvm::Error::success(); } case OpAttrType::F32: { float attr_value = attrs.GetAsserting<float>(entry.name); attr_tmp.set_f(attr_value); return llvm::Error::success(); } case OpAttrType::BOOL: { bool attr_value = attrs.GetAsserting<bool>(entry.name); attr_tmp.set_b(attr_value); return llvm::Error::success(); } case OpAttrType::DTYPE: { OpAttrType op_attr_type = attrs.GetAsserting<OpAttrType>(entry.name); DataType tf_dtype = ConvertToTfDataType(op_attr_type); attr_tmp.set_type(tf_dtype); return llvm::Error::success(); } case OpAttrType::SHAPE: { auto shape_attr = attrs.GetAsserting<tfrt::ShapeAttr>(entry.name); auto* mutable_shape = attr_tmp.mutable_shape(); if (shape_attr.HasRank()) { int rank = shape_attr.GetRank(); auto shape = shape_attr.GetShape(); mutable_shape->mutable_dim()->Reserve(rank); for (int d = 0; d < rank; ++d) { mutable_shape->add_dim()->set_size(shape[d]); } } else { mutable_shape->set_unknown_rank(true); } return llvm::Error::success(); } case OpAttrType::DENSE: { auto dense_attr = attrs.GetAsserting<tfrt::DenseAttr>(entry.name); llvm::Expected<tensorflow::Tensor> tf_tensor = DecodeDenseAttrToTfTensor(dense_attr, host); if (!tf_tensor) return tf_tensor.takeError(); auto* mutable_tensor = attr_tmp.mutable_tensor(); if (tf_tensor->NumElements() > 1) { tf_tensor->AsProtoTensorContent(mutable_tensor); } else { tf_tensor->AsProtoField(mutable_tensor); } return llvm::Error::success(); } case OpAttrType::AGGREGATE: { return FillAttrValueMapUsingAggregate(entry, attr_tmp, attrs); } default: LOG(ERROR) << "failure case"; return tfrt::MakeStringError("unsupported scalar attribute type"); } } } Status ParseTfDataType(absl::string_view dtype, DataType* data_type) { if (dtype == "DT_INT8") { *data_type = DataType::DT_INT8; return absl::OkStatus(); } else if (dtype == "DT_INT32") { *data_type = DataType::DT_INT32; return absl::OkStatus(); } else if (dtype == "DT_INT64") { *data_type = DataType::DT_INT64; return absl::OkStatus(); } else if (dtype == "DT_HALF") { *data_type = DataType::DT_HALF; return absl::OkStatus(); } else if (dtype == "DT_FLOAT") { *data_type = DataType::DT_FLOAT; return absl::OkStatus(); } else if (dtype == "DT_DOUBLE") { *data_type = DataType::DT_DOUBLE; return absl::OkStatus(); } else { return errors::InvalidArgument("Unsupported dtype, ", std::string(dtype), " in ParseTfDataType."); } } DataType ConvertToTfDataType(tfrt::OpAttrType op_attr_type) { switch (op_attr_type) { #define OP_ATTR_TYPE(TFRT_ENUM, DT_ENUM) \ case tfrt::OpAttrType::TFRT_ENUM: \ return DataType::DT_ENUM; #include "tensorflow/core/runtime_fallback/util/attr_type.def" default: TFRT_DLOG(ERROR) << "unsupported dtype" << static_cast<int>(op_attr_type) << " in TFRT fallback kernel."; abort(); } } tfrt::OpAttrType ConvertFromTfDataType(DataType data_type) { switch (data_type) { #define OP_ATTR_TYPE(TFRT_ENUM, DT_ENUM) \ case DataType::DT_ENUM: \ return tfrt::OpAttrType::TFRT_ENUM; #include "tensorflow/core/runtime_fallback/util/attr_type.def" default: TFRT_DLOG(ERROR) << "unsupported dtype " << static_cast<int>(data_type) << "in TFRT fallback kernel."; abort(); } } DataType ConvertBefAttrTypeToTfDataType(tfrt::DType attr_type) { switch (attr_type) { case tfrt::DType::I1: return DataType::DT_BOOL; case tfrt::DType::I8: return DataType::DT_INT8; case tfrt::DType::I16: return DataType::DT_INT16; case tfrt::DType::I32: return DataType::DT_INT32; case tfrt::DType::I64: return DataType::DT_INT64; case tfrt::DType::UI8: return DataType::DT_UINT8; case tfrt::DType::UI16: return DataType::DT_UINT16; case tfrt::DType::UI32: return DataType::DT_UINT32; case tfrt::DType::UI64: return DataType::DT_UINT64; case tfrt::DType::F16: return DataType::DT_HALF; case tfrt::DType::BF16: return DataType::DT_BFLOAT16; case tfrt::DType::F32: return DataType::DT_FLOAT; case tfrt::DType::F64: return DataType::DT_DOUBLE; case tfrt::DType::Complex64: return DataType::DT_COMPLEX64; case tfrt::DType::Complex128: return DataType::DT_COMPLEX128; case tfrt::DType::String: return DataType::DT_STRING; case tfrt::DType::Resource: return DataType::DT_RESOURCE; case tfrt::DType::Variant: return DataType::DT_VARIANT; case tfrt::DType::QUI8: return DataType::DT_QUINT8; case tfrt::DType::QUI16: return DataType::DT_QUINT16; case tfrt::DType::QI8: return DataType::DT_QINT8; case tfrt::DType::QI16: return DataType::DT_QINT16; case tfrt::DType::QI32: return DataType::DT_QINT32; default: TFRT_DLOG(ERROR) << "unsupported tfrt::DType" << static_cast<int>(attr_type) << " in TFRT fallback kernel."; abort(); } } tfrt::DType ConvertTfDataTypeToBefAttrType(DataType data_type) { switch (data_type) { case DataType::DT_UINT8: return tfrt::DType::UI8; case DataType::DT_UINT16: return tfrt::DType::UI16; case DataType::DT_UINT32: return tfrt::DType::UI32; case DataType::DT_UINT64: return tfrt::DType::UI64; case DataType::DT_BOOL: return tfrt::DType::I1; case DataType::DT_INT8: return tfrt::DType::I8; case DataType::DT_INT16: return tfrt::DType::I16; case DataType::DT_INT32: return tfrt::DType::I32; case DataType::DT_INT64: return tfrt::DType::I64; case DataType::DT_HALF: return tfrt::DType::F16; case DataType::DT_BFLOAT16: return tfrt::DType::BF16; case DataType::DT_FLOAT: return tfrt::DType::F32; case DataType::DT_DOUBLE: return tfrt::DType::F64; case DataType::DT_COMPLEX64: return tfrt::DType::Complex64; case DataType::DT_COMPLEX128: return tfrt::DType::Complex128; case DataType::DT_STRING: return tfrt::DType::String; case DataType::DT_RESOURCE: return tfrt::DType::Resource; case DataType::DT_VARIANT: return tfrt::DType::Variant; case DataType::DT_QUINT8: return tfrt::DType::QUI8; case DataType::DT_QUINT16: return tfrt::DType::QUI16; case DataType::DT_QINT8: return tfrt::DType::QI8; case DataType::DT_QINT16: return tfrt::DType::QI16; case DataType::DT_QINT32: return tfrt::DType::QI32; default: TFRT_DLOG(ERROR) << "unsupported DataType " << static_cast<int>(data_type) << " in TFRT fallback kernel."; abort(); } } Status ParseBoolAttrValue(absl::string_view attr_value, bool* bool_val) { if (attr_value == "false") { *bool_val = false; return absl::OkStatus(); } else if (attr_value == "true") { *bool_val = true; return absl::OkStatus(); } else { return errors::InvalidArgument("Could not parse bool from \"", attr_value, "\""); } } Status ParseIntAttrValue(absl::string_view attr_value, int64_t* int_val) { bool success = absl::SimpleAtoi(attr_value, int_val); if (!success) { return errors::InvalidArgument("Could not parse int from \"", attr_value, "\""); } return absl::OkStatus(); } Status ParseTensorAttrValue(absl::string_view attr_value, tensorflow::Tensor* tensor) { if (std::is_base_of<tensorflow::protobuf::Message, tensorflow::TensorProto>()) { tensorflow::TensorProto tensor_proto; auto* message = reinterpret_cast<protobuf::Message*>(&tensor_proto); if (protobuf::TextFormat::ParseFromString( static_cast<std::string>(attr_value), message) && tensor->FromProto(tensor_proto)) { return absl::OkStatus(); } else { return errors::InvalidArgument("Could not parse tensor value from \"", attr_value, "\""); } } else { return errors::InvalidArgument( "Tensor attributes are not supported on mobile."); } } Status ParseTensorShapeAttrValue(absl::string_view attr_value, std::vector<int64_t>* shape_val) { if (attr_value.size() < 2 || attr_value[0] != '[' || attr_value[attr_value.size() - 1] != ']') { return errors::InvalidArgument( "Tensor shape attribute must be a string of the form [1,2...], instead " "got \"", attr_value, "\""); } absl::string_view attr_value_trunc = attr_value.substr(1, attr_value.size() - 2); auto container = absl::StrSplit(attr_value_trunc, ','); for (auto it = container.begin(); it != container.end(); ++it) { int64_t int_val; if (!ParseIntAttrValue(*it, &int_val).ok()) { return errors::InvalidArgument("Failed to parse an integer value from ", *it, " while parsing shape."); } shape_val->push_back(int_val); } return absl::OkStatus(); } bool IsUnusedAttribute(absl::string_view attr_name) { return absl::StrContains(attr_name, "result_segment_sizes") || absl::StrContains(attr_name, "operand_segment_sizes") || absl::EndsWith(attr_name, "_tf_data_function"); } llvm::Error FillAttrValueMap(const tfrt::OpAttrsRef& attrs, tfrt::HostContext* host, tensorflow::AttrValueMap* attr_value_map) { AttrValue attr_tmp; llvm::Error error = llvm::Error::success(); attrs.IterateEntries([&error, attr_value_map, &attr_tmp, host, &attrs](const OpAttrsRawEntry& entry) { assert(strcmp(entry.name, "device") != 0); if (IsUnusedAttribute(entry.name)) { return; } else if (entry.IsArray()) { error = FillAttrValueMapUsingArray(entry, attr_tmp, attrs); } else { error = FillAttrValueMapUsingScalar(entry, attr_tmp, host, attrs); } if (error) return; attr_value_map->insert(AttrValueMap::value_type(entry.name, attr_tmp)); }); return error; } namespace { tensorflow::Tensor CreateTfTensorFromDenseAttr(tfrt::DenseAttr attr) { tensorflow::TensorShape shape(absl::InlinedVector<int64_t, 4>( attr.shape().begin(), attr.shape().end())); tensorflow::DataType dtype = ConvertBefAttrTypeToTfDataType(attr.dtype()); tensorflow::Tensor tensor(dtype, shape); std::memcpy(tensor.data(), attr.GetElements(), tensor.TotalBytes()); return tensor; } Status SetUpScalarAttr(tfrt::TypedAttrBase bef_attr, tensorflow::AttrValue* tf_attr) { if (auto shape_attr = bef_attr.dyn_cast<tfrt::ShapeAttr>()) { if (shape_attr.HasRank()) { tensorflow::PartialTensorShape tf_shape(shape_attr.GetShape()); tf_shape.AsProto(tf_attr->mutable_shape()); } else { tensorflow::PartialTensorShape unranked_shape; unranked_shape.AsProto(tf_attr->mutable_shape()); } } else if (auto dense_attr = bef_attr.dyn_cast<tfrt::DenseAttr>()) { auto tf_tensor = CreateTfTensorFromDenseAttr(dense_attr); tf_tensor.AsProtoTensorContent(tf_attr->mutable_tensor()); } else if (auto type_attr = bef_attr.dyn_cast<tfrt::TypeAttr>()) { tf_attr->set_type(ConvertBefAttrTypeToTfDataType(type_attr.GetValue())); } else if (auto i1_attr = bef_attr.dyn_cast<tfrt::I1Attr>()) { tf_attr->set_b(i1_attr.GetValue()); } else if (auto f32_attr = bef_attr.dyn_cast<tfrt::F32Attr>()) { tf_attr->set_f(f32_attr.GetValue()); } else if (auto i64_attr = bef_attr.dyn_cast<tfrt::I64Attr>()) { tf_attr->set_i(i64_attr.GetValue()); } else if (auto string_attr = bef_attr.dyn_cast<tfrt::StringAttr>()) { tf_attr->set_s(string_attr.GetValue().data(), string_attr.GetValue().size()); } else { return tensorflow::errors::Internal("Failed to set up attribute."); } return absl::OkStatus(); } Status SetUpScalarFunctionAttr(tfrt::StringAttr func_attr, tensorflow::AttrValue& tf_attr) { tfrt::string_view func_name = func_attr.GetValue(); tf_attr.mutable_func()->set_name(func_name.data(), func_name.size()); return absl::OkStatus(); } void AddShapeToAttrList(tfrt::ShapeAttr shape, tensorflow::AttrValue::ListValue* list) { if (shape.HasRank()) { tensorflow::PartialTensorShape tf_shape(shape.GetShape()); tf_shape.AsProto(list->add_shape()); return; } tensorflow::PartialTensorShape unranked_shape; unranked_shape.AsProto(list->add_shape()); } void AddTensorToAttrList(tfrt::DenseAttr dense_attr, tensorflow::AttrValue::ListValue* list) { auto tf_tensor = CreateTfTensorFromDenseAttr(dense_attr); tf_tensor.AsProtoTensorContent(list->add_tensor()); } Status SetUpListAttr(tfrt::AggregateAttr aggregate_attr, tensorflow::AttrValue* tf_attr) { auto* list = tf_attr->mutable_list(); for (int i = 0; i < aggregate_attr.GetNumElements(); ++i) { auto base = aggregate_attr.GetAttribute(i); if (auto shape_attr = base.dyn_cast<tfrt::ShapeAttr>()) { AddShapeToAttrList(shape_attr, list); } else if (auto dense_attr = base.dyn_cast<tfrt::DenseAttr>()) { AddTensorToAttrList(dense_attr, list); } else if (auto string_attr = base.dyn_cast<tfrt::StringAttr>()) { list->add_s(string_attr.GetValue().data(), string_attr.GetValue().size()); } else { return tensorflow::errors::Internal("Failed to set up list attr."); } } return absl::OkStatus(); } Status SetUpListAttr(tfrt::ArrayAttr array_attr, tensorflow::AttrValue* tf_attr) { auto* list = tf_attr->mutable_list(); if (array_attr.GetNumElements() == 0) { return absl::OkStatus(); } tfrt::BEFAttributeType element_type = array_attr.GetElementType(); if (tfrt::IsDataTypeAttribute(element_type)) { tfrt::DType dtype = GetDataType(element_type); switch (dtype) { case tfrt::DType::I1: { for (auto value : array_attr.GetValue<bool>()) { list->add_b(value); } return absl::OkStatus(); } case tfrt::DType::I64: { for (auto value : array_attr.GetValue<int64_t>()) { list->add_i(value); } return absl::OkStatus(); } case tfrt::DType::F32: { for (auto value : array_attr.GetValue<float>()) { list->add_f(value); } return absl::OkStatus(); } default: return tensorflow::errors::Internal( StrCat("Failed to set up list attr: unsupported dtype: ", tfrt::DType(dtype))); } } else if (element_type == tfrt::BEFAttributeType::kType) { for (auto value : array_attr.GetValue<tfrt::DType>()) { list->add_type(ConvertBefAttrTypeToTfDataType(value)); } return absl::OkStatus(); } return tensorflow::errors::Internal("Failed to set up list attr."); } } Status SetUpAttrValueMap(tfrt::AggregateAttr op_attr_array, tfrt::AggregateAttr op_func_attr_array, tensorflow::AttrValueMap* attr_value_map) { auto obtain_name_attr_pair = [](tfrt::AggregateAttr attr_array, int i) -> std::pair<std::string, tfrt::TypedAttrBase> { auto pair = attr_array.GetAttributeOfType<tfrt::AggregateAttr>(i); assert(pair.GetNumElements() == 2); return {pair.GetAttributeOfType<tfrt::StringAttr>(0).GetValue().str(), pair.GetAttribute(1)}; }; for (size_t i = 0, e = op_attr_array.GetNumElements(); i != e; ++i) { auto name_attr_pair = obtain_name_attr_pair(op_attr_array, i); if (IsUnusedAttribute(name_attr_pair.first)) continue; AttrValue& tf_attr = (*attr_value_map)[name_attr_pair.first]; tfrt::TypedAttrBase attr_value = name_attr_pair.second; if (auto aggregate_attr = attr_value.dyn_cast<tfrt::AggregateAttr>()) { TF_RETURN_IF_ERROR(SetUpListAttr(aggregate_attr, &tf_attr)); } else if (auto array_attr = attr_value.dyn_cast<tfrt::ArrayAttr>()) { TF_RETURN_IF_ERROR(SetUpListAttr(array_attr, &tf_attr)); } else { TF_RETURN_IF_ERROR(SetUpScalarAttr(attr_value, &tf_attr)); } } for (size_t i = 0, e = op_func_attr_array.GetNumElements(); i != e; ++i) { auto name_attr_pair = obtain_name_attr_pair(op_func_attr_array, i); if (IsUnusedAttribute(name_attr_pair.first)) continue; AttrValue& tf_attr = (*attr_value_map)[name_attr_pair.first]; auto attr_value = name_attr_pair.second.dyn_cast<tfrt::StringAttr>(); TF_RETURN_IF_ERROR(SetUpScalarFunctionAttr(attr_value, tf_attr)); } return absl::OkStatus(); } } }
#include "tensorflow/core/runtime_fallback/util/attr_util.h" #include <memory> #include <string> #include <vector> #include <gmock/gmock.h> #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" #include "tfrt/bef/bef_encoding.h" #include "tfrt/bef_converter/bef_attr_encoder.h" #include "tfrt/core_runtime/op_attr_type.h" #include "tfrt/dtype/dtype.h" #include "tfrt/host_context/attribute_utils.h" #include "tfrt/host_context/concurrent_work_queue.h" #include "tfrt/host_context/diagnostic.h" #include "tfrt/host_context/host_allocator.h" #include "tfrt/support/forward_decls.h" namespace tensorflow { namespace tfd { namespace { using ::testing::ElementsAre; using ::testing::Eq; using ::testing::EqualsProto; using ::testing::Pair; using ::testing::UnorderedElementsAre; std::unique_ptr<tfrt::HostContext> CreateTestHostContext() { return std::make_unique<tfrt::HostContext>( [](const tfrt::DecodedDiagnostic&) {}, tfrt::CreateMallocAllocator(), tfrt::CreateSingleThreadedWorkQueue()); } struct DataTypeAndString { std::string str_val; DataType dtype; }; class ParseTfDataTypeTest : public ::testing::TestWithParam<DataTypeAndString> { }; INSTANTIATE_TEST_SUITE_P( AllDTypes, ParseTfDataTypeTest, ::testing::Values(DataTypeAndString{"DT_INT8", DataType::DT_INT8}, DataTypeAndString{"DT_INT32", DataType::DT_INT32}, DataTypeAndString{"DT_INT64", DataType::DT_INT64}, DataTypeAndString{"DT_HALF", DataType::DT_HALF}, DataTypeAndString{"DT_FLOAT", DataType::DT_FLOAT}, DataTypeAndString{"DT_DOUBLE", DataType::DT_DOUBLE})); TEST_P(ParseTfDataTypeTest, Ok) { DataType data_type; ASSERT_EQ(ParseTfDataType(GetParam().str_val, &data_type), absl::OkStatus()); EXPECT_EQ(data_type, GetParam().dtype); } TEST(ParseTfDataTypeTest, ReturnsInvalidArgument) { DataType data_type; EXPECT_EQ(ParseTfDataType("DT_BFLOAT16_REF", &data_type), errors::InvalidArgument( "Unsupported dtype, DT_BFLOAT16_REF in ParseTfDataType.")); } TEST(UtilsTest, ToAbslStringViewOk) { std::string str("Tensorflow Runtime"); tfrt::string_view str_view(str); EXPECT_EQ(ToAbslStringView(str_view), str); } struct OpAttrTypeAndDType { tfrt::OpAttrType op_attr_type; DataType dtype; }; class OpAttrTypeDTypeTest : public ::testing::TestWithParam<OpAttrTypeAndDType> {}; INSTANTIATE_TEST_SUITE_P( AllDTypes, OpAttrTypeDTypeTest, ::testing::Values( OpAttrTypeAndDType{tfrt::OpAttrType::BOOL, DataType::DT_BOOL}, OpAttrTypeAndDType{tfrt::OpAttrType::UI8, DataType::DT_UINT8}, OpAttrTypeAndDType{tfrt::OpAttrType::I8, DataType::DT_INT8}, OpAttrTypeAndDType{tfrt::OpAttrType::I16, DataType::DT_INT16}, OpAttrTypeAndDType{tfrt::OpAttrType::UI16, DataType::DT_UINT16}, OpAttrTypeAndDType{tfrt::OpAttrType::I32, DataType::DT_INT32}, OpAttrTypeAndDType{tfrt::OpAttrType::UI32, DataType::DT_UINT32}, OpAttrTypeAndDType{tfrt::OpAttrType::I64, DataType::DT_INT64}, OpAttrTypeAndDType{tfrt::OpAttrType::UI64, DataType::DT_UINT64}, OpAttrTypeAndDType{tfrt::OpAttrType::BF16, DataType::DT_BFLOAT16}, OpAttrTypeAndDType{tfrt::OpAttrType::F16, DataType::DT_HALF}, OpAttrTypeAndDType{tfrt::OpAttrType::F32, DataType::DT_FLOAT}, OpAttrTypeAndDType{tfrt::OpAttrType::F64, DataType::DT_DOUBLE}, OpAttrTypeAndDType{tfrt::OpAttrType::COMPLEX64, DataType::DT_COMPLEX64}, OpAttrTypeAndDType{tfrt::OpAttrType::COMPLEX128, DataType::DT_COMPLEX128}, OpAttrTypeAndDType{tfrt::OpAttrType::CHAR, DataType::DT_STRING}, OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QUI8, DataType::DT_QUINT8}, OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QUI16, DataType::DT_QUINT16}, OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QI8, DataType::DT_QINT8}, OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QI16, DataType::DT_QINT16}, OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QI32, DataType::DT_QINT32}, OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_RESOURCE, DataType::DT_RESOURCE}, OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_VARIANT, DataType::DT_VARIANT})); TEST_P(OpAttrTypeDTypeTest, ToTfDataTypeOk) { EXPECT_EQ(ConvertToTfDataType(GetParam().op_attr_type), GetParam().dtype); } TEST_P(OpAttrTypeDTypeTest, FromTfDataTypeOk) { EXPECT_EQ(ConvertFromTfDataType(GetParam().dtype), GetParam().op_attr_type); } TEST(OpAttrTypeDTypeTest, DeathUnsupportedDType) { EXPECT_DEATH(ConvertFromTfDataType(DataType::DT_RESOURCE_REF), ""); } struct TfrtDTypeAndTensorflowDType { tfrt::DType tfrt_dtype; DataType dtype; }; class TfrtToTensorflowDTypeTest : public ::testing::TestWithParam<TfrtDTypeAndTensorflowDType> {}; INSTANTIATE_TEST_SUITE_P( AllDTypes, TfrtToTensorflowDTypeTest, ::testing::Values( TfrtDTypeAndTensorflowDType{tfrt::DType::I1, DataType::DT_BOOL}, TfrtDTypeAndTensorflowDType{tfrt::DType::UI8, DataType::DT_UINT8}, TfrtDTypeAndTensorflowDType{tfrt::DType::I8, DataType::DT_INT8}, TfrtDTypeAndTensorflowDType{tfrt::DType::I16, DataType::DT_INT16}, TfrtDTypeAndTensorflowDType{tfrt::DType::UI16, DataType::DT_UINT16}, TfrtDTypeAndTensorflowDType{tfrt::DType::I32, DataType::DT_INT32}, TfrtDTypeAndTensorflowDType{tfrt::DType::UI32, DataType::DT_UINT32}, TfrtDTypeAndTensorflowDType{tfrt::DType::I64, DataType::DT_INT64}, TfrtDTypeAndTensorflowDType{tfrt::DType::UI64, DataType::DT_UINT64}, TfrtDTypeAndTensorflowDType{tfrt::DType::BF16, DataType::DT_BFLOAT16}, TfrtDTypeAndTensorflowDType{tfrt::DType::F16, DataType::DT_HALF}, TfrtDTypeAndTensorflowDType{tfrt::DType::F32, DataType::DT_FLOAT}, TfrtDTypeAndTensorflowDType{tfrt::DType::F64, DataType::DT_DOUBLE}, TfrtDTypeAndTensorflowDType{tfrt::DType::Complex64, DataType::DT_COMPLEX64}, TfrtDTypeAndTensorflowDType{tfrt::DType::Complex128, DataType::DT_COMPLEX128}, TfrtDTypeAndTensorflowDType{tfrt::DType::String, DataType::DT_STRING}, TfrtDTypeAndTensorflowDType{tfrt::DType::QUI8, DataType::DT_QUINT8}, TfrtDTypeAndTensorflowDType{tfrt::DType::QUI16, DataType::DT_QUINT16}, TfrtDTypeAndTensorflowDType{tfrt::DType::QI8, DataType::DT_QINT8}, TfrtDTypeAndTensorflowDType{tfrt::DType::QI16, DataType::DT_QINT16}, TfrtDTypeAndTensorflowDType{tfrt::DType::QI32, DataType::DT_QINT32}, TfrtDTypeAndTensorflowDType{tfrt::DType::Resource, DataType::DT_RESOURCE}, TfrtDTypeAndTensorflowDType{tfrt::DType::Variant, DataType::DT_VARIANT})); TEST_P(TfrtToTensorflowDTypeTest, BefAttrTypeToTfDataTypeOk) { EXPECT_EQ(ConvertBefAttrTypeToTfDataType(GetParam().tfrt_dtype), GetParam().dtype); } TEST_P(TfrtToTensorflowDTypeTest, TfDataTypeTpBefAttrTypeOk) { EXPECT_EQ(ConvertTfDataTypeToBefAttrType(GetParam().dtype), GetParam().tfrt_dtype); } TEST(TfrtToTensorflowDTypeTest, DeathUnsupportedDType) { EXPECT_DEATH(ConvertTfDataTypeToBefAttrType(DataType::DT_RESOURCE_REF), ""); } TEST(UtilsTest, ParseTensorAttrValueOk) { tensorflow::Tensor tensor; std::string tensor_str = R"pb(dtype: DT_INT32 tensor_shape { dim { size: 2 } dim { size: 2 } } int_val: 1 int_val: 1 int_val: 1 int_val: 1)pb"; ASSERT_EQ(ParseTensorAttrValue(tensor_str, &tensor), absl::OkStatus()); EXPECT_EQ(tensor.dtype(), DT_INT32); EXPECT_EQ(tensor.NumElements(), 4); } TEST(UtilsTest, ParseTensorAttrValueReturnsInvalidArgument) { tensorflow::Tensor tensor; std::string tensor_str = R"pb(foobar)pb"; EXPECT_EQ( ParseTensorAttrValue(tensor_str, &tensor), errors::InvalidArgument("Could not parse tensor value from \"foobar\"")); } TEST(UtilsTest, ParseTensorShapeAttrValueOk) { std::vector<int64_t> dims; ASSERT_THAT(ParseTensorShapeAttrValue("[1,2,3]", &dims), absl::OkStatus()); EXPECT_THAT(dims, ElementsAre(Eq(1), Eq(2), Eq(3))); } TEST(UtilsTest, ParseTensorShapeAttrValueInvalidArgument) { std::vector<int64_t> dims; EXPECT_EQ( ParseTensorShapeAttrValue("foobar", &dims), errors::InvalidArgument("Tensor shape attribute must be a string of the " "form [1,2...], instead got \"foobar\"")); } TEST(UtilsTest, ParseTensorShapeAttrValueInvalidArgumentEmptyString) { std::vector<int64_t> dims; EXPECT_EQ(ParseTensorShapeAttrValue("", &dims), errors::InvalidArgument("Tensor shape attribute must be a string " "of the form [1,2...], instead got \"\"")); } TEST(UtilsTest, ParseBoolAttrValueOk) { bool bool_val; ASSERT_THAT(ParseBoolAttrValue("false", &bool_val), absl::OkStatus()); EXPECT_FALSE(bool_val); ASSERT_THAT(ParseBoolAttrValue("true", &bool_val), absl::OkStatus()); EXPECT_TRUE(bool_val); } TEST(UtilsTest, ParseBoolAttrValueInvalidArgument) { bool bool_val; EXPECT_EQ(ParseBoolAttrValue("foobar", &bool_val), errors::InvalidArgument("Could not parse bool from \"foobar\"")); } TEST(UtilsTest, ParseIntAttrValueOk) { int64_t int_val; ASSERT_THAT(ParseIntAttrValue("42", &int_val), absl::OkStatus()); EXPECT_EQ(int_val, 42); } TEST(UtilsTest, ParseIntAttrValueInvalidArgument) { int64_t int_val; EXPECT_EQ(ParseIntAttrValue("foobar", &int_val), errors::InvalidArgument("Could not parse int from \"foobar\"")); } TEST(UtilsTest, IsUnusedAttributeOk) { EXPECT_TRUE(IsUnusedAttribute("result_segment_sizes")); EXPECT_TRUE(IsUnusedAttribute("operand_segment_sizes")); EXPECT_TRUE(IsUnusedAttribute("_tf_data_function")); EXPECT_FALSE(IsUnusedAttribute("device")); } TEST(UtilsTest, FillAttrValueMapOk) { tfrt::OpAttrs attrs; attrs.SetArray("shape", tfrt::ArrayRef<int64_t>{2, 2}); attrs.SetArray("values", tfrt::ArrayRef<float>{2}); attrs.SetArray("flags", tfrt::ArrayRef<bool>{false, true}); attrs.SetArray("baz", tfrt::ArrayRef<char>{'a'}); attrs.Set<bool>("transpose_a", false); attrs.Set<bool>("transpose_b", true); attrs.Set<int64_t>("result_segment_sizes", 2); attrs.Set<float>("foo", 2); attrs.Set<int64_t>("bar", 2); tfrt::AggregateAttr aggAttr; attrs.Set<tfrt::AggregateAttr>("aggAttr", aggAttr); AttrValueMap map; auto host_context = CreateTestHostContext(); ASSERT_FALSE(llvm::errorToBool( FillAttrValueMap(attrs.freeze(), host_context.get(), &map))); EXPECT_THAT( map, UnorderedElementsAre( Pair(Eq("shape"), EqualsProto(R"pb(list { i: 2 i: 2 })pb")), Pair(Eq("values"), EqualsProto(R"pb(list { f: 2 })pb")), Pair(Eq("flags"), EqualsProto(R"pb(list { b: false b: true })pb")), Pair(Eq("baz"), EqualsProto(R"pb(s: "a")pb")), Pair(Eq("transpose_a"), EqualsProto(R"pb(b: false)pb")), Pair(Eq("transpose_b"), EqualsProto(R"pb(b: true)pb")), Pair(Eq("foo"), EqualsProto(R"pb(f: 2)pb")), Pair(Eq("bar"), EqualsProto(R"pb(i: 2)pb")), Pair(Eq("aggAttr"), EqualsProto(R"pb(list {})pb")))); } } } }
llvm::Error FillAttrValueMap(const tfrt::OpAttrsRef& attrs, tfrt::HostContext* host, tensorflow::AttrValueMap* attr_value_map) { AttrValue attr_tmp; llvm::Error error = llvm::Error::success(); attrs.IterateEntries([&error, attr_value_map, &attr_tmp, host, &attrs](const OpAttrsRawEntry& entry) { assert(strcmp(entry.name, "device") != 0); if (IsUnusedAttribute(entry.name)) { return; } else if (entry.IsArray()) { error = FillAttrValueMapUsingArray(entry, attr_tmp, attrs); } else { error = FillAttrValueMapUsingScalar(entry, attr_tmp, host, attrs); } if (error) return; attr_value_map->insert(AttrValueMap::value_type(entry.name, attr_tmp)); }); return error; }
TEST(UtilsTest, FillAttrValueMapOk) { tfrt::OpAttrs attrs; attrs.SetArray("shape", tfrt::ArrayRef<int64_t>{2, 2}); attrs.SetArray("values", tfrt::ArrayRef<float>{2}); attrs.SetArray("flags", tfrt::ArrayRef<bool>{false, true}); attrs.SetArray("baz", tfrt::ArrayRef<char>{'a'}); attrs.Set<bool>("transpose_a", false); attrs.Set<bool>("transpose_b", true); attrs.Set<int64_t>("result_segment_sizes", 2); attrs.Set<float>("foo", 2); attrs.Set<int64_t>("bar", 2); tfrt::AggregateAttr aggAttr; attrs.Set<tfrt::AggregateAttr>("aggAttr", aggAttr); AttrValueMap map; auto host_context = CreateTestHostContext(); ASSERT_FALSE(llvm::errorToBool( FillAttrValueMap(attrs.freeze(), host_context.get(), &map))); EXPECT_THAT( map, UnorderedElementsAre( Pair(Eq("shape"), EqualsProto(R"pb(list { i: 2 i: 2 })pb")), Pair(Eq("values"), EqualsProto(R"pb(list { f: 2 })pb")), Pair(Eq("flags"), EqualsProto(R"pb(list { b: false b: true })pb")), Pair(Eq("baz"), EqualsProto(R"pb(s: "a")pb")), Pair(Eq("transpose_a"), EqualsProto(R"pb(b: false)pb")), Pair(Eq("transpose_b"), EqualsProto(R"pb(b: true)pb")), Pair(Eq("foo"), EqualsProto(R"pb(f: 2)pb")), Pair(Eq("bar"), EqualsProto(R"pb(i: 2)pb")), Pair(Eq("aggAttr"), EqualsProto(R"pb(list {})pb")))); }
#include "tsl/lib/histogram/histogram.h" #include <float.h> #include <math.h> #include <vector> #include "tsl/platform/logging.h" #include "tsl/platform/mutex.h" #include "tsl/platform/types.h" #include "tsl/protobuf/histogram.pb.h" namespace tsl { namespace histogram { static std::vector<double>* InitDefaultBucketsInner() { std::vector<double> buckets; std::vector<double> neg_buckets; double v = 1.0e-12; while (v < 1.0e20) { buckets.push_back(v); neg_buckets.push_back(-v); v *= 1.1; } buckets.push_back(DBL_MAX); neg_buckets.push_back(-DBL_MAX); std::reverse(neg_buckets.begin(), neg_buckets.end()); std::vector<double>* result = new std::vector<double>; result->insert(result->end(), neg_buckets.begin(), neg_buckets.end()); result->push_back(0.0); result->insert(result->end(), buckets.begin(), buckets.end()); return result; } static absl::Span<const double> InitDefaultBuckets() { static std::vector<double>* default_bucket_limits = InitDefaultBucketsInner(); return *default_bucket_limits; } Histogram::Histogram() : bucket_limits_(InitDefaultBuckets()) { Clear(); } Histogram::Histogram(absl::Span<const double> custom_bucket_limits) : custom_bucket_limits_(custom_bucket_limits.begin(), custom_bucket_limits.end()), bucket_limits_(custom_bucket_limits_) { #ifndef NDEBUG DCHECK_GT(bucket_limits_.size(), size_t{0}); for (size_t i = 1; i < bucket_limits_.size(); i++) { DCHECK_GT(bucket_limits_[i], bucket_limits_[i - 1]); } #endif Clear(); } bool Histogram::DecodeFromProto(const HistogramProto& proto) { if ((proto.bucket_size() != proto.bucket_limit_size()) || (proto.bucket_size() == 0)) { return false; } min_ = proto.min(); max_ = proto.max(); num_ = proto.num(); sum_ = proto.sum(); sum_squares_ = proto.sum_squares(); custom_bucket_limits_.clear(); custom_bucket_limits_.insert(custom_bucket_limits_.end(), proto.bucket_limit().begin(), proto.bucket_limit().end()); bucket_limits_ = custom_bucket_limits_; buckets_.clear(); buckets_.insert(buckets_.end(), proto.bucket().begin(), proto.bucket().end()); return true; } void Histogram::Clear() { min_ = bucket_limits_[bucket_limits_.size() - 1]; max_ = -DBL_MAX; num_ = 0; sum_ = 0; sum_squares_ = 0; buckets_.resize(bucket_limits_.size()); for (size_t i = 0; i < bucket_limits_.size(); i++) { buckets_[i] = 0; } } void Histogram::Add(double value) { int b = std::upper_bound(bucket_limits_.begin(), bucket_limits_.end(), value) - bucket_limits_.begin(); buckets_[b] += 1.0; if (min_ > value) min_ = value; if (max_ < value) max_ = value; num_++; sum_ += value; sum_squares_ += (value * value); } double Histogram::Median() const { return Percentile(50.0); } double Histogram::Remap(double x, double x0, double x1, double y0, double y1) const { return y0 + (x - x0) / (x1 - x0) * (y1 - y0); } double Histogram::Percentile(double p) const { if (num_ == 0.0) return 0.0; double threshold = num_ * (p / 100.0); double cumsum_prev = 0; for (size_t i = 0; i < buckets_.size(); i++) { double cumsum = cumsum_prev + buckets_[i]; if (cumsum >= threshold) { if (cumsum == cumsum_prev) { continue; } double lhs = (i == 0 || cumsum_prev == 0) ? min_ : bucket_limits_[i - 1]; lhs = std::max(lhs, min_); double rhs = bucket_limits_[i]; rhs = std::min(rhs, max_); double weight = Remap(threshold, cumsum_prev, cumsum, lhs, rhs); return weight; } cumsum_prev = cumsum; } return max_; } double Histogram::Average() const { if (num_ == 0.0) return 0; return sum_ / num_; } double Histogram::StandardDeviation() const { if (num_ == 0.0) return 0; double variance = (sum_squares_ * num_ - sum_ * sum_) / (num_ * num_); return sqrt(variance); } std::string Histogram::ToString() const { std::string r; char buf[200]; snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_, Average(), StandardDeviation()); r.append(buf); snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n", (num_ == 0.0 ? 0.0 : min_), Median(), max_); r.append(buf); r.append("------------------------------------------------------\n"); const double mult = num_ > 0 ? 100.0 / num_ : 0.0; double sum = 0; for (size_t b = 0; b < buckets_.size(); b++) { if (buckets_[b] <= 0.0) continue; sum += buckets_[b]; snprintf(buf, sizeof(buf), "[ %10.2g, %10.2g ) %7.0f %7.3f%% %7.3f%% ", ((b == 0) ? -DBL_MAX : bucket_limits_[b - 1]), bucket_limits_[b], buckets_[b], mult * buckets_[b], mult * sum); r.append(buf); int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5); r.append(marks, '#'); r.push_back('\n'); } return r; } void Histogram::EncodeToProto(HistogramProto* proto, bool preserve_zero_buckets) const { proto->Clear(); proto->set_min(min_); proto->set_max(max_); proto->set_num(num_); proto->set_sum(sum_); proto->set_sum_squares(sum_squares_); for (size_t i = 0; i < buckets_.size();) { double end = bucket_limits_[i]; double count = buckets_[i]; i++; if (!preserve_zero_buckets && count <= 0.0) { while (i < buckets_.size() && buckets_[i] <= 0.0) { end = bucket_limits_[i]; count = buckets_[i]; i++; } } proto->add_bucket_limit(end); proto->add_bucket(count); } if (proto->bucket_size() == 0.0) { proto->add_bucket_limit(DBL_MAX); proto->add_bucket(0.0); } } bool ThreadSafeHistogram::DecodeFromProto(const HistogramProto& proto) { mutex_lock l(mu_); return histogram_.DecodeFromProto(proto); } void ThreadSafeHistogram::Clear() { mutex_lock l(mu_); histogram_.Clear(); } void ThreadSafeHistogram::Add(double value) { mutex_lock l(mu_); histogram_.Add(value); } void ThreadSafeHistogram::EncodeToProto(HistogramProto* proto, bool preserve_zero_buckets) const { mutex_lock l(mu_); histogram_.EncodeToProto(proto, preserve_zero_buckets); } double ThreadSafeHistogram::Median() const { mutex_lock l(mu_); return histogram_.Median(); } double ThreadSafeHistogram::Percentile(double p) const { mutex_lock l(mu_); return histogram_.Percentile(p); } double ThreadSafeHistogram::Average() const { mutex_lock l(mu_); return histogram_.Average(); } double ThreadSafeHistogram::StandardDeviation() const { mutex_lock l(mu_); return histogram_.StandardDeviation(); } std::string ThreadSafeHistogram::ToString() const { mutex_lock l(mu_); return histogram_.ToString(); } } }
#include "tsl/lib/histogram/histogram.h" #include <float.h> #include "tsl/platform/logging.h" #include "tsl/platform/test.h" #include "tsl/protobuf/histogram.pb.h" namespace tsl { namespace histogram { static void Validate(const Histogram& h) { string s1 = h.ToString(); LOG(ERROR) << s1; HistogramProto proto_with_zeroes; h.EncodeToProto(&proto_with_zeroes, true); Histogram h2; EXPECT_TRUE(h2.DecodeFromProto(proto_with_zeroes)); string s2 = h2.ToString(); LOG(ERROR) << s2; EXPECT_EQ(s1, s2); HistogramProto proto_no_zeroes; h.EncodeToProto(&proto_no_zeroes, false); LOG(ERROR) << proto_no_zeroes.DebugString(); Histogram h3; EXPECT_TRUE(h3.DecodeFromProto(proto_no_zeroes)); string s3 = h3.ToString(); LOG(ERROR) << s3; EXPECT_EQ(s1, s3); } TEST(Histogram, Empty) { Histogram h; Validate(h); } TEST(Histogram, SingleValue) { Histogram h; h.Add(-3.0); Validate(h); } TEST(Histogram, CustomBuckets) { Histogram h({-10, -5, 0, 5, 10, 100, 1000, 10000, DBL_MAX}); h.Add(-3.0); h.Add(4.99); h.Add(5.0); h.Add(1000.0); Validate(h); } TEST(Histogram, Median) { Histogram h({0, 10, 100, DBL_MAX}); h.Add(-2); h.Add(-2); h.Add(0); double median = h.Median(); EXPECT_EQ(median, -0.5); } TEST(Histogram, Percentile) { Histogram h({1, 2, 3, 4}); h.Add(-1.0); h.Add(1.5); h.Add(1.5); h.Add(1.5); h.Add(2.5); h.Add(2.5); h.Add(2.5); h.Add(2.5); h.Add(3.5); h.Add(3.9); EXPECT_EQ(h.Percentile(0), -1.0); EXPECT_EQ(h.Percentile(25), 1.5); EXPECT_EQ(h.Percentile(50), 2.25); EXPECT_EQ(h.Percentile(75), 2.875); EXPECT_EQ(h.Percentile(90), 3.45); EXPECT_EQ(h.Percentile(100), 3.9); } TEST(Histogram, Basic) { Histogram h; for (int i = 0; i < 100; i++) { h.Add(i); } for (int i = 1000; i < 100000; i += 1000) { h.Add(i); } Validate(h); } TEST(ThreadSafeHistogram, Basic) { Histogram h; for (int i = 0; i < 100; i++) { h.Add(i); } ThreadSafeHistogram tsh; for (int i = 0; i < 100; i++) { tsh.Add(i); } for (int i = 0; i < 2; ++i) { bool preserve_zero_buckets = (i == 0); HistogramProto h_proto; h.EncodeToProto(&h_proto, preserve_zero_buckets); HistogramProto tsh_proto; tsh.EncodeToProto(&tsh_proto, preserve_zero_buckets); Histogram h2; EXPECT_TRUE(h2.DecodeFromProto(tsh_proto)); ThreadSafeHistogram tsh2; EXPECT_TRUE(tsh2.DecodeFromProto(h_proto)); EXPECT_EQ(h2.ToString(), tsh2.ToString()); } EXPECT_EQ(h.Median(), tsh.Median()); EXPECT_EQ(h.Percentile(40.0), tsh.Percentile(40.0)); EXPECT_EQ(h.Average(), tsh.Average()); EXPECT_EQ(h.StandardDeviation(), tsh.StandardDeviation()); EXPECT_EQ(h.ToString(), tsh.ToString()); } } }
void Histogram::Clear() { min_ = bucket_limits_[bucket_limits_.size() - 1]; max_ = -DBL_MAX; num_ = 0; sum_ = 0; sum_squares_ = 0; buckets_.resize(bucket_limits_.size()); for (size_t i = 0; i < bucket_limits_.size(); i++) { buckets_[i] = 0; } }
TEST(Histogram, Empty) { Histogram h; Validate(h); } TEST(Histogram, SingleValue) { Histogram h; h.Add(-3.0); Validate(h); } TEST(Histogram, CustomBuckets) { Histogram h({-10, -5, 0, 5, 10, 100, 1000, 10000, DBL_MAX}); h.Add(-3.0); h.Add(4.99); h.Add(5.0); h.Add(1000.0); Validate(h); } TEST(Histogram, Basic) { Histogram h; for (int i = 0; i < 100; i++) { h.Add(i); } for (int i = 1000; i < 100000; i += 1000) { h.Add(i); } Validate(h); }
#include "xla/reference_util.h" #include <array> #include <cmath> #include <functional> #include <memory> #include <optional> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/functional/function_ref.h" #include "xla/client/xla_builder.h" #include "xla/hlo/evaluator/hlo_evaluator.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/literal_util.h" #include "xla/service/shape_inference.h" #include "xla/window_util.h" #include "xla/xla_data.pb.h" #include "tsl/lib/math/math_util.h" #include "tsl/platform/logging.h" namespace xla { std::unique_ptr<Array2D<double>> ReferenceUtil::Array2DF32ToF64( const Array2D<float>& input) { auto result = std::make_unique<Array2D<double>>(input.height(), input.width()); for (int64_t rowno = 0; rowno < input.height(); ++rowno) { for (int64_t colno = 0; colno < input.width(); ++colno) { (*result)(rowno, colno) = input(rowno, colno); } } return result; } std::unique_ptr<Array3D<float>> ReferenceUtil::ConvArray3D( const Array3D<float>& lhs, const Array3D<float>& rhs, int64_t kernel_stride, Padding padding) { return ConvArray3DGeneralDimensionsDilated( lhs, rhs, kernel_stride, padding, 1, 1, XlaBuilder::CreateDefaultConvDimensionNumbers(1)); } std::unique_ptr<Array3D<float>> ReferenceUtil::ConvArray3DGeneralDimensionsDilated( const Array3D<float>& lhs, const Array3D<float>& rhs, int64_t kernel_stride, Padding padding, int64_t lhs_dilation, int64_t rhs_dilation, const ConvolutionDimensionNumbers& dnums) { CHECK_EQ(dnums.input_spatial_dimensions_size(), 1); CHECK_EQ(dnums.kernel_spatial_dimensions_size(), 1); CHECK_EQ(dnums.output_spatial_dimensions_size(), 1); Array4D<float> a4dlhs(lhs.n1(), lhs.n2(), lhs.n3(), 1); a4dlhs.Each([&](absl::Span<const int64_t> indices, float* value_ptr) { CHECK_EQ(indices[3], 0); *value_ptr = lhs.operator()(indices[0], indices[1], indices[2]); }); Array4D<float> a4drhs(rhs.n1(), rhs.n2(), rhs.n3(), 1); a4drhs.Each([&](absl::Span<const int64_t> indices, float* value_ptr) { CHECK_EQ(indices[3], 0); *value_ptr = rhs.operator()(indices[0], indices[1], indices[2]); }); ConvolutionDimensionNumbers dnums2d = dnums; dnums2d.add_input_spatial_dimensions(3); dnums2d.add_kernel_spatial_dimensions(3); dnums2d.add_output_spatial_dimensions(3); std::unique_ptr<Array4D<float>> convr4 = ConvArray4DGeneralDimensionsDilated( a4dlhs, a4drhs, {kernel_stride, 1}, padding, {lhs_dilation, 1}, {rhs_dilation, 1}, dnums2d); auto convr3 = std::make_unique<Array3D<float>>( convr4->planes(), convr4->depth(), convr4->height()); convr4->Each([&](absl::Span<const int64_t> indices, float* value_ptr) { CHECK_EQ(indices[3], 0); convr3->operator()(indices[0], indices[1], indices[2]) = *value_ptr; }); return convr3; } std::unique_ptr<Array4D<float>> ReferenceUtil::ConvArray4D( const Array4D<float>& lhs, const Array4D<float>& rhs, std::pair<int64_t, int64_t> kernel_stride, Padding padding) { return ConvArray4DGeneralDimensions( lhs, rhs, kernel_stride, padding, XlaBuilder::CreateDefaultConvDimensionNumbers()); } std::unique_ptr<Array4D<float>> ReferenceUtil::SeparableConvArray4D(const Array4D<float>& input, const Array4D<float>& depthwise_weights, const Array4D<float>& pointwise_weights, std::pair<int64_t, int64_t> kernel_stride, Padding padding) { const int64_t depth_multiplier = depthwise_weights.planes(); CHECK_EQ(pointwise_weights.depth(), input.depth() * depth_multiplier); Array4D<float> weights(pointwise_weights.planes(), input.depth(), depthwise_weights.height(), depthwise_weights.width()); for (int64_t kx = 0; kx < depthwise_weights.width(); ++kx) { for (int64_t ky = 0; ky < depthwise_weights.height(); ++ky) { for (int64_t kz = 0; kz < input.depth(); ++kz) { for (int64_t out = 0; out < pointwise_weights.planes(); ++out) { float weight = 0.0; for (int64_t depth = 0; depth < depth_multiplier; ++depth) { weight += depthwise_weights(depth, kz, ky, kx) * pointwise_weights(out, depth + kz * depth_multiplier, 0, 0); } weights(out, kz, ky, kx) = weight; } } } } return ConvArray4D(input, weights, kernel_stride, padding); } int64_t ReferenceUtil::WindowCount(int64_t unpadded_width, int64_t window_len, int64_t stride, Padding padding) { if (padding == Padding::kValid) { return window_util::StridedBound(unpadded_width, window_len, stride); } return tsl::MathUtil::CeilOfRatio(unpadded_width, stride); } std::unique_ptr<std::vector<float>> ReferenceUtil::ReduceWindow1DGeneric( absl::Span<const float> operand, float init, absl::FunctionRef<float(float, float)> reduce_func, absl::Span<const int64_t> window, absl::Span<const int64_t> stride, absl::Span<const std::pair<int64_t, int64_t>> padding) { CHECK_EQ(window.size(), 1); CHECK_EQ(stride.size(), 1); CHECK_EQ(padding.size(), 1); int64_t padded_width = padding[0].first + operand.size() + padding[0].second; int64_t stride_amount = stride[0]; int64_t window_size = window[0]; int64_t result_size = window_util::StridedBound(padded_width, window_size, stride_amount); int64_t pad_low = padding[0].first; auto result = std::make_unique<std::vector<float>>(result_size); for (int64_t i0 = 0; i0 < result_size; ++i0) { int64_t i0_base = i0 * stride_amount - pad_low; float val = init; for (int64_t i0_win = 0; i0_win < window_size; ++i0_win) { if (i0_base + i0_win >= 0 && i0_base + i0_win < operand.size()) { val = reduce_func(val, operand[i0_base + i0_win]); } } (*result)[i0] = val; } return result; } std::unique_ptr<std::vector<float>> ReferenceUtil::ReduceWindow1DAdd(absl::Span<const float> operand, float init, absl::Span<const int64_t> window, absl::Span<const int64_t> stride, Padding padding) { const auto add_reduce = [](float arg1, float arg2) { return arg1 + arg2; }; std::vector<int64_t> dim_lengths{static_cast<int64_t>(operand.size())}; return ReduceWindow1DGeneric( operand, init, add_reduce, window, stride, xla::MakePadding(dim_lengths, window, stride, padding)); } std::unique_ptr<Array3D<float>> ReferenceUtil::ReduceWindow3DAdd( const Array3D<float>& operand, float init, absl::Span<const int64_t> window, absl::Span<const int64_t> stride, Padding padding) { std::vector<int64_t> dim_lengths{operand.n1(), operand.n2(), operand.n3()}; auto padding_both = xla::MakePadding(dim_lengths, window, stride, padding); std::vector<int64_t> window_counts(window.size(), 0); std::vector<int64_t> pad_low(window.size(), 0); for (int64_t i = 0; i < window.size(); ++i) { window_counts[i] = WindowCount(dim_lengths[i], window[i], stride[i], padding); pad_low[i] = padding_both[i].first; } auto result = std::make_unique<Array3D<float>>( window_counts[0], window_counts[1], window_counts[2]); for (int64_t i0 = 0; i0 < window_counts[0]; ++i0) { for (int64_t i1 = 0; i1 < window_counts[1]; ++i1) { for (int64_t i2 = 0; i2 < window_counts[2]; ++i2) { int64_t i0_base = i0 * stride[0] - pad_low[0]; int64_t i1_base = i1 * stride[1] - pad_low[1]; int64_t i2_base = i2 * stride[2] - pad_low[2]; float val = init; for (int64_t i0_win = 0; i0_win < window[0]; ++i0_win) { for (int64_t i1_win = 0; i1_win < window[1]; ++i1_win) { for (int64_t i2_win = 0; i2_win < window[2]; ++i2_win) { if (i0_base + i0_win >= 0 && i1_base + i1_win >= 0 && i2_base + i2_win >= 0 && i0_base + i0_win < operand.n1() && i1_base + i1_win < operand.n2() && i2_base + i2_win < operand.n3()) { val += operand(i0_base + i0_win, i1_base + i1_win, i2_base + i2_win); } } } } (*result)(i0, i1, i2) = val; } } } return result; } std::unique_ptr<Array4D<float>> ReferenceUtil::ReduceWindow4DGeneric( const Array4D<float>& operand, float init, absl::FunctionRef<float(float, float)> reduce_func, absl::Span<const int64_t> window, absl::Span<const int64_t> stride, Padding padding) { std::vector<int64_t> dim_lengths{operand.n1(), operand.n2(), operand.n3(), operand.n4()}; return ReduceWindow4DGeneric( operand, init, reduce_func, window, stride, xla::MakePadding(dim_lengths, window, stride, padding)); } std::unique_ptr<Array4D<float>> ReferenceUtil::ReduceWindow4DGeneric( const Array4D<float>& operand, float init, absl::FunctionRef<float(float, float)> reduce_func, absl::Span<const int64_t> window, absl::Span<const int64_t> stride, absl::Span<const std::pair<int64_t, int64_t>> padding) { std::vector<int64_t> dim_lengths{operand.n1(), operand.n2(), operand.n3(), operand.n4()}; std::vector<int64_t> window_counts(window.size(), 0); std::vector<int64_t> pad_low(window.size(), 0); for (int64_t i = 0; i < window.size(); ++i) { int64_t padded_width = padding[i].first + dim_lengths[i] + padding[i].second; window_counts[i] = window_util::StridedBound(padded_width, window[i], stride[i]); pad_low[i] = padding[i].first; } auto result = std::make_unique<Array4D<float>>( window_counts[0], window_counts[1], window_counts[2], window_counts[3]); for (int64_t i0 = 0; i0 < window_counts[0]; ++i0) { for (int64_t i1 = 0; i1 < window_counts[1]; ++i1) { for (int64_t i2 = 0; i2 < window_counts[2]; ++i2) { for (int64_t i3 = 0; i3 < window_counts[3]; ++i3) { int64_t i0_base = i0 * stride[0] - pad_low[0]; int64_t i1_base = i1 * stride[1] - pad_low[1]; int64_t i2_base = i2 * stride[2] - pad_low[2]; int64_t i3_base = i3 * stride[3] - pad_low[3]; float val = init; for (int64_t i0_win = 0; i0_win < window[0]; ++i0_win) { for (int64_t i1_win = 0; i1_win < window[1]; ++i1_win) { for (int64_t i2_win = 0; i2_win < window[2]; ++i2_win) { for (int64_t i3_win = 0; i3_win < window[3]; ++i3_win) { if (i0_base + i0_win >= 0 && i1_base + i1_win >= 0 && i2_base + i2_win >= 0 && i3_base + i3_win >= 0 && i0_base + i0_win < operand.n1() && i1_base + i1_win < operand.n2() && i2_base + i2_win < operand.n3() && i3_base + i3_win < operand.n4()) { val = reduce_func( val, operand(i0_base + i0_win, i1_base + i1_win, i2_base + i2_win, i3_base + i3_win)); } } } } } (*result)(i0, i1, i2, i3) = val; } } } } return result; } std::unique_ptr<Array4D<float>> ReferenceUtil::ReduceWindow4DAdd( const Array4D<float>& operand, float init, absl::Span<const int64_t> window, absl::Span<const int64_t> stride, Padding padding) { const auto add_reduce = [](float arg1, float arg2) { return arg1 + arg2; }; return ReduceWindow4DGeneric(operand, init, add_reduce, window, stride, padding); } std::unique_ptr<Array4D<float>> ReferenceUtil::BatchNorm4D( const Array4D<float>& input, const Array4D<float>& mean, const Array4D<float>& var, const Array4D<float>& scale, const Array4D<float>& offset, float epsilon) { auto normalized = *MapArray4D(input, mean, [](float a, float b) { return a - b; }); normalized = *MapArray4D(normalized, var, [&](float a, float b) { return a / std::sqrt(b + epsilon); }); normalized = *MapArray4D(normalized, scale, [](float a, float b) { return a * b; }); return MapArray4D(normalized, offset, [](float a, float b) { return a + b; }); } std::unique_ptr<Array4D<float>> ReferenceUtil::SelectAndScatter4DGePlus(const Array4D<float>& operand, const Array4D<float>& source, float init, absl::Span<const int64_t> window, absl::Span<const int64_t> stride, bool same_padding) { Padding padding = same_padding ? Padding::kSame : Padding::kValid; auto result = std::make_unique<Array4D<float>>(operand.n1(), operand.n2(), operand.n3(), operand.n4()); std::vector<int64_t> dim_lengths{operand.n1(), operand.n2(), operand.n3(), operand.n4()}; auto padding_both = xla::MakePadding(dim_lengths, window, stride, padding); result->Fill(init); std::vector<int64_t> window_counts(window.size(), 0); std::vector<int64_t> pad_low(window.size(), 0); for (int64_t i = 0; i < window.size(); ++i) { window_counts[i] = WindowCount(dim_lengths[i], window[i], stride[i], padding); pad_low[i] = padding_both[i].first; } CHECK_EQ(window_counts[0], source.n1()); CHECK_EQ(window_counts[1], source.n2()); CHECK_EQ(window_counts[2], source.n3()); CHECK_EQ(window_counts[3], source.n4()); for (int64_t i0 = 0; i0 < window_counts[0]; ++i0) { for (int64_t i1 = 0; i1 < window_counts[1]; ++i1) { for (int64_t i2 = 0; i2 < window_counts[2]; ++i2) { for (int64_t i3 = 0; i3 < window_counts[3]; ++i3) { int64_t i0_base = i0 * stride[0] - pad_low[0]; int64_t i1_base = i1 * stride[1] - pad_low[1]; int64_t i2_base = i2 * stride[2] - pad_low[2]; int64_t i3_base = i3 * stride[3] - pad_low[3]; int64_t scatter_0 = (i0_base >= 0) ? i0_base : 0; int64_t scatter_1 = (i1_base >= 0) ? i1_base : 0; int64_t scatter_2 = (i2_base >= 0) ? i2_base : 0; int64_t scatter_3 = (i3_base >= 0) ? i3_base : 0; float val = operand(scatter_0, scatter_1, scatter_2, scatter_3); for (int64_t i0_win = 0; i0_win < window[0]; ++i0_win) { for (int64_t i1_win = 0; i1_win < window[1]; ++i1_win) { for (int64_t i2_win = 0; i2_win < window[2]; ++i2_win) { for (int64_t i3_win = 0; i3_win < window[3]; ++i3_win) { if (i0_base + i0_win >= 0 && i1_base + i1_win >= 0 && i2_base + i2_win >= 0 && i3_base + i3_win >= 0 && i0_base + i0_win < operand.n1() && i1_base + i1_win < operand.n2() && i2_base + i2_win < operand.n3() && i3_base + i3_win < operand.n4()) { float tmp = operand(i0_base + i0_win, i1_base + i1_win, i2_base + i2_win, i3_base + i3_win); if (tmp > val) { val = tmp; scatter_0 = i0_base + i0_win; scatter_1 = i1_base + i1_win; scatter_2 = i2_base + i2_win; scatter_3 = i3_base + i3_win; } } } } } } (*result)(scatter_0, scatter_1, scatter_2, scatter_3) += source(i0, i1, i2, i3); } } } } return result; } std::unique_ptr<Array4D<float>> ReferenceUtil::ConvArray4DGeneralDimensions( const Array4D<float>& lhs, const Array4D<float>& rhs, std::pair<int64_t, int64_t> kernel_stride, Padding padding, ConvolutionDimensionNumbers dimension_numbers) { return ConvArray4DGeneralDimensionsDilated(lhs, rhs, kernel_stride, padding, {1, 1}, {1, 1}, std::move(dimension_numbers)); } std::unique_ptr<Array4D<float>> ReferenceUtil::ConvArray4DGeneralDimensionsDilated( const Array4D<float>& lhs, const Array4D<float>& rhs, std::pair<int64_t, int64_t> kernel_stride, Padding padding, std::pair<int64_t, int64_t> lhs_dilation, std::pair<int64_t, int64_t> rhs_dilation, ConvolutionDimensionNumbers dnums) { HloComputation::Builder b("ConvArray4DGeneralDimensionDilated"); auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs); auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs); std::array<int64_t, 2> ordered_kernel_strides; std::array<int64_t, 2> ordered_input_dimensions; std::array<int64_t, 2> ordered_kernel_dimensions; if (dnums.kernel_spatial_dimensions(0) > dnums.kernel_spatial_dimensions(1)) { ordered_kernel_strides[0] = kernel_stride.second; ordered_kernel_strides[1] = kernel_stride.first; } else { ordered_kernel_strides[0] = kernel_stride.first; ordered_kernel_strides[1] = kernel_stride.second; } ordered_input_dimensions[0] = lhs_literal.shape().dimensions(dnums.input_spatial_dimensions(0)); ordered_input_dimensions[1] = lhs_literal.shape().dimensions(dnums.input_spatial_dimensions(1)); ordered_kernel_dimensions[0] = rhs_literal.shape().dimensions(dnums.kernel_spatial_dimensions(0)); ordered_kernel_dimensions[1] = rhs_literal.shape().dimensions(dnums.kernel_spatial_dimensions(1)); std::vector<std::pair<int64_t, int64_t>> paddings = MakePadding(ordered_input_dimensions, ordered_kernel_dimensions, ordered_kernel_strides, padding); CHECK_EQ(paddings.size(), 2); Window window; WindowDimension dim; dim.set_size( rhs_literal.shape().dimensions(dnums.kernel_spatial_dimensions(0))); dim.set_stride(kernel_stride.first); dim.set_padding_low(paddings[0].first); dim.set_padding_high(paddings[0].second); dim.set_window_dilation(rhs_dilation.first); dim.set_base_dilation(lhs_dilation.first); *window.add_dimensions() = dim; WindowDimension dim2; dim2.set_size( rhs_literal.shape().dimensions(dnums.kernel_spatial_dimensions(1))); dim2.set_stride(kernel_stride.second); dim2.set_padding_low(paddings[1].first); dim2.set_padding_high(paddings[1].second); dim2.set_window_dilation(rhs_dilation.second); dim2.set_base_dilation(lhs_dilation.second); *window.add_dimensions() = dim2; const Shape shape = ShapeInference::InferConvolveShape( lhs_literal.shape(), rhs_literal.shape(), 1, 1, window, dnums, std::nullopt) .value(); HloInstruction* lhs_instruction = b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal))); HloInstruction* rhs_instruction = b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal))); PrecisionConfig precision_config; precision_config.mutable_operand_precision()->Resize( 2, PrecisionConfig::DEFAULT); b.AddInstruction(HloInstruction::CreateConvolve( shape, lhs_instruction, rhs_instruction, 1, 1, window, dnums, precision_config)); HloModuleConfig config; HloModule module("ReferenceUtil", config); auto computation = module.AddEntryComputation(b.Build()); HloEvaluator evaluator; Literal result_literal = evaluator.Evaluate(*computation, {}).value(); CHECK_EQ(result_literal.shape().rank(), 4); auto result = std::make_unique<Array4D<float>>(result_literal.shape().dimensions(0), result_literal.shape().dimensions(1), result_literal.shape().dimensions(2), result_literal.shape().dimensions(3)); result->Each([&](absl::Span<const int64_t> indices, float* value) { *value = result_literal.Get<float>(indices); }); return result; } std::unique_ptr<std::vector<float>> ReferenceUtil::ReduceToColArray2D( const Array2D<float>& matrix, float init, absl::FunctionRef<float(float, float)> reduce_function) { int64_t rows = matrix.height(); int64_t cols = matrix.width(); auto result = std::make_unique<std::vector<float>>(); for (int64_t i = 0; i < rows; ++i) { float acc = init; for (int64_t j = 0; j < cols; ++j) { acc = reduce_function(acc, matrix(i, j)); } result->push_back(acc); } return result; } std::unique_ptr<std::vector<float>> ReferenceUtil::ReduceToRowArray2D( const Array2D<float>& matrix, float init, absl::FunctionRef<float(float, float)> reduce_function) { int64_t rows = matrix.height(); int64_t cols = matrix.width(); auto result = std::make_unique<std::vector<float>>(); for (int64_t i = 0; i < cols; ++i) { float acc = init; for (int64_t j = 0; j < rows; ++j) { acc = reduce_function(acc, matrix(j, i)); } result->push_back(acc); } return result; } std::vector<float> ReferenceUtil::Reduce4DTo1D( const Array4D<float>& array, float init, absl::Span<const int64_t> dims, absl::FunctionRef<float(float, float)> reduce_function) { std::vector<float> result; CHECK_EQ(dims.size(), 3); const absl::flat_hash_set<int64_t> dim_set(dims.begin(), dims.end()); CHECK_EQ(dim_set.size(), 3); for (int64_t a0 = 0; a0 == 0 || (!dim_set.contains(0) && a0 < array.n1()); ++a0) { for (int64_t a1 = 0; a1 == 0 || (!dim_set.contains(1) && a1 < array.n2()); ++a1) { for (int64_t a2 = 0; a2 == 0 || (!dim_set.contains(2) && a2 < array.n3()); ++a2) { for (int64_t a3 = 0; a3 == 0 || (!dim_set.contains(3) && a3 < array.n4()); ++a3) { float accumulator = init; for (int64_t i0 = 0; i0 == 0 || (dim_set.contains(0) && i0 < array.n1()); ++i0) { for (int64_t i1 = 0; i1 == 0 || (dim_set.contains(1) && i1 < array.n2()); ++i1) { for (int64_t i2 = 0; i2 == 0 || (dim_set.contains(2) && i2 < array.n3()); ++i2) { for (int64_t i3 = 0; i3 == 0 || (dim_set.contains(3) && i3 < array.n4()); ++i3) { if (array.n1() > 0 && array.n2() > 0 && array.n3() > 0 && array.n4() > 0) { accumulator = reduce_function( accumulator, array(a0 + i0, a1 + i1, a2 + i2, a3 + i3)); } } } } } result.push_back(accumulator); } } } } return result; } std::unique_ptr<Array4D<float>> ReferenceUtil::Broadcast1DTo4D( const std::vector<float>& array, const std::vector<int64_t>& bounds, int64_t broadcast_from_dim) { auto result = std::make_unique<Array4D<float>>(bounds[0], bounds[1], bounds[2], bounds[3]); for (int64_t i = 0; i < result->n1(); ++i) { for (int64_t j = 0; j < result->n2(); ++j) { for (int64_t k = 0; k < result->n3(); ++k) { for (int64_t l = 0; l < result->n4(); ++l) { switch (broadcast_from_dim) { case 0: (*result)(i, j, k, l) = array[i]; break; case 1: (*result)(i, j, k, l) = array[j]; break; case 2: (*result)(i, j, k, l) = array[k]; break; case 3: (*result)(i, j, k, l) = array[l]; break; default: break; } } } } } return result; } std::unique_ptr<Array2D<float>> ReferenceUtil::Reduce3DTo2D( const Array3D<float>& array, float init, absl::Span<const int64_t> dims, absl::FunctionRef<float(float, float)> reduce_function) { CHECK_EQ(dims.size(), 1); int64_t rows = dims[0] == 0 ? array.n2() : array.n1(); int64_t cols = dims[0] == 2 ? array.n2() : array.n3(); auto result = std::make_unique<Array2D<float>>(rows, cols); result->Fill(init); for (int i0 = 0; i0 < array.n1(); ++i0) { for (int i1 = 0; i1 < array.n2(); ++i1) { for (int i2 = 0; i2 < array.n3(); ++i2) { int64_t row = dims[0] == 0 ? i1 : i0; int64_t col = dims[0] == 2 ? i1 : i2; (*result)(row, col) = reduce_function((*result)(row, col), array(i0, i1, i2)); } } } return result; } std::unique_ptr<Array2D<float>> ReferenceUtil::MapArray2D( const Array2D<float>& matrix, absl::FunctionRef<float(float)> map_function) { int64_t rows = matrix.height(); int64_t cols = matrix.width(); auto result = std::make_unique<Array2D<float>>(rows, cols); for (int64_t i = 0; i < rows; ++i) { for (int64_t j = 0; j < cols; ++j) { (*result)(i, j) = map_function(matrix(i, j)); } } return result; } std::unique_ptr<Array2D<float>> ReferenceUtil::MapArray2D( const Array2D<float>& lhs, const Array2D<float>& rhs, absl::FunctionRef<float(float, float)> map_function) { CHECK_EQ(lhs.height(), rhs.height()); CHECK_EQ(lhs.width(), rhs.width()); int64_t rows = lhs.height(); int64_t cols = rhs.width(); auto result = std::make_unique<Array2D<float>>(rows, cols); for (int64_t i = 0; i < rows; ++i) { for (int64_t j = 0; j < cols; ++j) { (*result)(i, j) = map_function(lhs(i, j), rhs(i, j)); } } return result; } std::unique_ptr<Array3D<float>> ReferenceUtil::MapArray3D( const Array3D<float>& array, absl::FunctionRef<float(float)> map_function) { int64_t n1 = array.n1(); int64_t n2 = array.n2(); int64_t n3 = array.n3(); auto result = std::make_unique<Array3D<float>>(n1, n2, n3); for (int64_t i = 0; i < n1; ++i) { for (int64_t j = 0; j < n2; ++j) { for (int64_t k = 0; k < n3; ++k) { (*result)(i, j, k) = map_function(array(i, j, k)); } } } return result; } std::unique_ptr<Array3D<float>> ReferenceUtil::MapArray3D( const Array3D<float>& lhs, const Array3D<float>& rhs, absl::FunctionRef<float(float, float)> map_function) { CHECK_EQ(lhs.n1(), rhs.n1()); CHECK_EQ(lhs.n2(), rhs.n2()); CHECK_EQ(lhs.n3(), rhs.n3()); int64_t n1 = lhs.n1(); int64_t n2 = rhs.n2(); int64_t n3 = rhs.n3(); auto result = std::make_unique<Array3D<float>>(n1, n2, n3); for (int64_t i = 0; i < n1; ++i) { for (int64_t j = 0; j < n2; ++j) { for (int64_t k = 0; k < n3; ++k) { (*result)(i, j, k) = map_function(lhs(i, j, k), rhs(i, j, k)); } } } return result; } std::unique_ptr<Array2D<float>> ReferenceUtil::MapWithIndexArray2D( const Array2D<float>& matrix, absl::FunctionRef<float(float, int64_t, int64_t)> map_function) { int64_t rows = matrix.height(); int64_t cols = matrix.width(); auto result = std::make_unique<Array2D<float>>(rows, cols); for (int64_t i = 0; i < rows; ++i) { for (int64_t j = 0; j < cols; ++j) { (*result)(i, j) = map_function(matrix(i, j), i, j); } } return result; } }
#include "xla/reference_util.h" #include <cmath> #include <memory> #include "xla/array2d.h" #include "xla/array3d.h" #include "xla/array4d.h" #include "xla/client/padding.h" #include "xla/literal.h" #include "xla/test.h" #include "xla/tests/literal_test_util.h" #include "xla/xla_data.pb.h" namespace xla { namespace { class ReferenceUtilTest : public ::testing::Test { protected: ReferenceUtilTest() { matrix_ = std::make_unique<Array2D<float>>(rows_, cols_); for (int64_t i = 0; i < rows_; ++i) { for (int64_t j = 0; j < cols_; ++j) { (*matrix_)(i, j) = i * cols_ + j + 1; } } } const int64_t rows_ = 2; const int64_t cols_ = 3; std::unique_ptr<Array2D<float>> matrix_; }; TEST_F(ReferenceUtilTest, TransposeArray2D) { auto result = ReferenceUtil::TransposeArray2D(*matrix_); auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2Near<float>({{1.f, 4.f}, {2.f, 5.f}, {3.f, 6.f}}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, MatmulArray2D) { Array2D<float> rhs({ {7.f, 8.f}, {9.f, 10.f}, {11.f, 12.f}, }); auto result = ReferenceUtil::MatmulArray2D(*matrix_, rhs); auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2Near<float>({{58.f, 64.f}, {139.f, 154.f}}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ReduceToColArray2D) { auto add = [](float lhs, float rhs) { return lhs + rhs; }; auto result = ReferenceUtil::ReduceToColArray2D(*matrix_, 0.0f, add); auto actual_literal = LiteralUtil::CreateR1<float>(*result); LiteralTestUtil::ExpectR1Near<float>({6.f, 15.f}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ReduceToRowArray2D) { auto add = [](float lhs, float rhs) { return lhs + rhs; }; auto result = ReferenceUtil::ReduceToRowArray2D(*matrix_, 0.0f, add); auto actual_literal = LiteralUtil::CreateR1<float>(*result); LiteralTestUtil::ExpectR1Near<float>({5.f, 7.f, 9.f}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, Array2DF32ToF64Test) { auto result = ReferenceUtil::Array2DF32ToF64(*matrix_); ASSERT_EQ(result->height(), matrix_->height()); ASSERT_EQ(result->width(), matrix_->width()); for (int64_t rowno = 0; rowno < matrix_->height(); ++rowno) { for (int64_t colno = 0; colno < matrix_->width(); ++colno) { EXPECT_EQ(static_cast<double>((*matrix_)(rowno, colno)), (*result)(rowno, colno)); } } } TEST_F(ReferenceUtilTest, Reduce4Dto1DZeroSizedArray) { auto result = LiteralUtil::CreateR1<float>(ReferenceUtil::Reduce4DTo1D( Array4D<float>(1, 0, 1, 1), 0, {0, 1, 2}, [](float a, float b) { return a + b; })); LiteralTestUtil::ExpectR1Equal<float>({0}, result); } TEST_F(ReferenceUtilTest, MapArray2D) { auto identity = [](float value) { return std::log(std::exp(value)); }; auto result = ReferenceUtil::MapArray2D(*matrix_, identity); auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2NearArray2D(*matrix_, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, MapArray3D) { auto identity = [](float value) { return std::log(std::exp(value)); }; Array3D<float> input(2, 3, 4); input.FillIota(0); auto result = ReferenceUtil::MapArray3D(input, identity); auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result); LiteralTestUtil::ExpectR3NearArray3D(input, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, MapWithIndexArray2D) { auto add_index = [](float value, int64_t row, int64_t col) { return value + row + col; }; auto result = ReferenceUtil::MapWithIndexArray2D(*matrix_, add_index); auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2Near<float>({{1.f, 3.f, 5.f}, {5.f, 7.f, 9.f}}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, MapArray4D) { auto input = std::make_unique<Array4D<float>>(2, 3, 4, 5); input->FillWithMultiples(1.0f); auto multiply_by_two = [](float value) { return 2 * value; }; auto result = ReferenceUtil::MapArray4D(*input, multiply_by_two); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result); Array4D<float> expected(2, 3, 4, 5); expected.FillWithMultiples(2.0f); LiteralTestUtil::ExpectR4NearArray4D(expected, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, MapWithIndexArray4D) { auto input = std::make_unique<Array4D<float>>(2, 3, 4, 5); input->FillWithMultiples(1.0f); auto subtract_index = [](float value, int64_t plane, int64_t depth, int64_t height, int64_t width) { return value - (3 * 4 * 5 * plane + 4 * 5 * depth + 5 * height + width); }; auto result = ReferenceUtil::MapWithIndexArray4D(*input, subtract_index); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result); Array4D<float> expected(2, 3, 4, 5); expected.Fill(0.0f); LiteralTestUtil::ExpectR4NearArray4D(expected, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, SliceArray2D) { auto result = ReferenceUtil::Slice2D(*matrix_, {{0, 0}}, {{2, 2}}, {{1, 1}}); auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2Near<float>({{1.f, 2.f}, {4.f, 5.f}}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, SliceStridedArray2D) { auto result = ReferenceUtil::Slice2D(*matrix_, {{0, 0}}, {{2, 3}}, {{1, 2}}); auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2Near<float>({{1.f, 3.f}, {4.f, 6.f}}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, SliceArray3D) { Array3D<float> input(2, 3, 4); input.FillIota(0); auto result = ReferenceUtil::Slice3D(input, {{0, 0, 0}}, {{2, 2, 2}}, {{1, 1, 1}}); auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result); LiteralTestUtil::ExpectR3Near<float>( {{{0.f, 1.f}, {4.f, 5.f}}, {{12.f, 13.f}, {16.f, 17.f}}}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, SliceStridedArray3D) { Array3D<float> input(2, 3, 4); input.FillIota(0); auto result = ReferenceUtil::Slice3D(input, {{0, 0, 0}}, {{2, 3, 4}}, {{1, 2, 2}}); auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result); LiteralTestUtil::ExpectR3Near<float>( {{{0.f, 2.f}, {8.f, 10.f}}, {{12.f, 14.f}, {20.f, 22.f}}}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, SliceArray4D) { Array4D<float> input(2, 3, 4, 5); input.FillIota(0); auto result = ReferenceUtil::Slice4D(input, {{1, 0, 0, 0}}, {{2, 2, 2, 2}}, {{1, 1, 1, 1}}); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result); LiteralTestUtil::ExpectR4Near<float>( {{{{60.f, 61.f}, {65.f, 66.f}}, {{80.f, 81.f}, {85.f, 86.f}}}}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, SliceStridedArray4D) { Array4D<float> input(2, 3, 4, 5); input.FillIota(0); auto result = ReferenceUtil::Slice4D(input, {{1, 0, 0, 0}}, {{2, 3, 4, 5}}, {{1, 2, 2, 2}}); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result); LiteralTestUtil::ExpectR4Near<float>( {{{{60.f, 62.f, 64.f}, {70.f, 72.f, 74.f}}, {{100.f, 102.f, 104.f}, {110.f, 112.f, 114.f}}}}, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ConvArray3DWithSamePadding) { Array3D<float> input = {{{1, 2, 3, 4}}}; Array3D<float> weights = {{{5, 6}}}; std::unique_ptr<Array3D<float>> actual = ReferenceUtil::ConvArray3D(input, weights, 1, Padding::kSame); Array3D<float> expected = {{{17, 28, 39, 20}}}; auto actual_literal = LiteralUtil::CreateR3FromArray3D(*actual); LiteralTestUtil::ExpectR3NearArray3D<float>(expected, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ConvArray3DWithValidPadding) { Array3D<float> input = {{{1, 2, 3, 4}}}; Array3D<float> weights = {{{5, 6}}}; std::unique_ptr<Array3D<float>> actual = ReferenceUtil::ConvArray3D(input, weights, 1, Padding::kValid); Array3D<float> expected = {{{17, 28, 39}}}; auto actual_literal = LiteralUtil::CreateR3FromArray3D(*actual); LiteralTestUtil::ExpectR3NearArray3D<float>(expected, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ConvWithSamePadding) { Array4D<float> input(1, 1, 4, 4); input.FillWithYX(Array2D<float>({ {1, 2, 3, 4 }, {5, 6, 7, 8 }, {9, 10, 11, 12}, {13, 14, 15, 16}, })); Array4D<float> weights(1, 1, 2, 2); weights.FillWithYX(Array2D<float>({ {5, 6}, {7, 8}, })); std::unique_ptr<Array4D<float>> actual = ReferenceUtil::ConvArray4D(input, weights, {1, 1}, Padding::kSame); Array4D<float> expected(1, 1, 4, 4); expected.FillWithYX(Array2D<float>({ {100, 126, 152, 76}, {204, 230, 256, 124}, {308, 334, 360, 172}, {149, 160, 171, 80}, })); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ConvWithValidPadding) { Array4D<float> input(1, 1, 4, 4); input.FillWithYX(Array2D<float>({ {1, 2, 3, 4 }, {5, 6, 7, 8 }, {9, 10, 11, 12}, {13, 14, 15, 16}, })); Array4D<float> weights(1, 1, 2, 2); weights.FillWithYX(Array2D<float>({ {5, 6}, {7, 8}, })); std::unique_ptr<Array4D<float>> actual = ReferenceUtil::ConvArray4D(input, weights, {1, 1}, Padding::kValid); Array4D<float> expected(1, 1, 3, 3); expected.FillWithYX(Array2D<float>({ {1*5+2*6+5*7+6*8, 126, 152}, {204, 230, 256}, {308, 334, 11*5+12*6+15*7+16*8}, })); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithSamePadding) { Array4D<float> input({ {{{1, 2, 3, 4}}, {{5, 6, 7, 8}}, {{9, 10, 11, 12}}}, {{{13, 14, 15, 16}}, {{17, 18, 19, 20}}, {{21, 22, 23, 24}}} }); Array4D<float> weight({{ {{1, 2, 3}, {4, 5, 6}}, {{7, 8, 9}, {10, 11, 12}}, {{13, 14, 15}, {16, 17, 18}} }}); ConvolutionDimensionNumbers dimension_numbers; dimension_numbers.set_input_batch_dimension(2); dimension_numbers.set_input_feature_dimension(0); dimension_numbers.set_output_batch_dimension(2); dimension_numbers.set_output_feature_dimension(0); dimension_numbers.add_input_spatial_dimensions(1); dimension_numbers.add_output_spatial_dimensions(1); dimension_numbers.add_input_spatial_dimensions(3); dimension_numbers.add_output_spatial_dimensions(3); dimension_numbers.set_kernel_output_feature_dimension(0); dimension_numbers.set_kernel_input_feature_dimension(2); dimension_numbers.add_kernel_spatial_dimensions(1); dimension_numbers.add_kernel_spatial_dimensions(3); std::unique_ptr<Array4D<float>> actual = ReferenceUtil::ConvArray4DGeneralDimensions( input, weight, {1, 1}, Padding::kSame, dimension_numbers); Array4D<float> expected({{ {{1110, 1688, 1838, 1226}}, {{1683, 2514, 2685, 1761}}, {{878, 1280, 1358, 866}} }}); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithValidPadding) { Array4D<float> input({ {{{1, 2, 3, 4}}, {{5, 6, 7, 8}}, {{9, 10, 11, 12}}}, {{{13, 14, 15, 16}}, {{17, 18, 19, 20}}, {{21, 22, 23, 24}}} }); Array4D<float> weight({{ {{1, 7, 13}, {4, 10, 16}}, {{2, 8, 14}, {5, 11, 17}}, {{3, 9, 15}, {6, 12, 18}} }}); ConvolutionDimensionNumbers dimension_numbers; dimension_numbers.set_input_batch_dimension(2); dimension_numbers.set_input_feature_dimension(0); dimension_numbers.set_output_batch_dimension(2); dimension_numbers.set_output_feature_dimension(0); dimension_numbers.add_input_spatial_dimensions(1); dimension_numbers.add_output_spatial_dimensions(1); dimension_numbers.add_input_spatial_dimensions(3); dimension_numbers.add_output_spatial_dimensions(3); dimension_numbers.set_kernel_output_feature_dimension(0); dimension_numbers.set_kernel_input_feature_dimension(2); dimension_numbers.add_kernel_spatial_dimensions(3); dimension_numbers.add_kernel_spatial_dimensions(1); std::unique_ptr<Array4D<float>> actual = ReferenceUtil::ConvArray4DGeneralDimensions( input, weight, {1, 1}, Padding::kValid, dimension_numbers); Array4D<float> expected({{{{2514, 2685}}}}); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ApplyElementwise2D) { Array2D<float> a({{1, 2}, {3, 4}}); Array2D<float> b({{10, 20}, {30, 40}}); Array2D<float> c({{100, 200}, {300, 400}}); auto actual = ReferenceUtil::ApplyElementwise2D( [](float x, float y, float z) { return 100 * x + 10 * y + z; }, a, b, c); auto actual_literal = LiteralUtil::CreateR2FromArray2D(*actual); LiteralTestUtil::ExpectR2Near({{300.f, 600.f}, {900.f, 1200.f}}, actual_literal, ErrorSpec(0.0001)); } } }
std::unique_ptr<Array4D<float>> ReferenceUtil::ConvArray4DGeneralDimensions( const Array4D<float>& lhs, const Array4D<float>& rhs, std::pair<int64_t, int64_t> kernel_stride, Padding padding, ConvolutionDimensionNumbers dimension_numbers) { return ConvArray4DGeneralDimensionsDilated(lhs, rhs, kernel_stride, padding, {1, 1}, {1, 1}, std::move(dimension_numbers)); }
TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithSamePadding) { Array4D<float> input({ {{{1, 2, 3, 4}}, {{5, 6, 7, 8}}, {{9, 10, 11, 12}}}, {{{13, 14, 15, 16}}, {{17, 18, 19, 20}}, {{21, 22, 23, 24}}} }); Array4D<float> weight({{ {{1, 2, 3}, {4, 5, 6}}, {{7, 8, 9}, {10, 11, 12}}, {{13, 14, 15}, {16, 17, 18}} }}); ConvolutionDimensionNumbers dimension_numbers; dimension_numbers.set_input_batch_dimension(2); dimension_numbers.set_input_feature_dimension(0); dimension_numbers.set_output_batch_dimension(2); dimension_numbers.set_output_feature_dimension(0); dimension_numbers.add_input_spatial_dimensions(1); dimension_numbers.add_output_spatial_dimensions(1); dimension_numbers.add_input_spatial_dimensions(3); dimension_numbers.add_output_spatial_dimensions(3); dimension_numbers.set_kernel_output_feature_dimension(0); dimension_numbers.set_kernel_input_feature_dimension(2); dimension_numbers.add_kernel_spatial_dimensions(1); dimension_numbers.add_kernel_spatial_dimensions(3); std::unique_ptr<Array4D<float>> actual = ReferenceUtil::ConvArray4DGeneralDimensions( input, weight, {1, 1}, Padding::kSame, dimension_numbers); Array4D<float> expected({{ {{1110, 1688, 1838, 1226}}, {{1683, 2514, 2685, 1761}}, {{878, 1280, 1358, 866}} }}); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal, ErrorSpec(0.0001)); } TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithValidPadding) { Array4D<float> input({ {{{1, 2, 3, 4}}, {{5, 6, 7, 8}}, {{9, 10, 11, 12}}}, {{{13, 14, 15, 16}}, {{17, 18, 19, 20}}, {{21, 22, 23, 24}}} }); Array4D<float> weight({{ {{1, 7, 13}, {4, 10, 16}}, {{2, 8, 14}, {5, 11, 17}}, {{3, 9, 15}, {6, 12, 18}} }}); ConvolutionDimensionNumbers dimension_numbers; dimension_numbers.set_input_batch_dimension(2); dimension_numbers.set_input_feature_dimension(0); dimension_numbers.set_output_batch_dimension(2); dimension_numbers.set_output_feature_dimension(0); dimension_numbers.add_input_spatial_dimensions(1); dimension_numbers.add_output_spatial_dimensions(1); dimension_numbers.add_input_spatial_dimensions(3); dimension_numbers.add_output_spatial_dimensions(3); dimension_numbers.set_kernel_output_feature_dimension(0); dimension_numbers.set_kernel_input_feature_dimension(2); dimension_numbers.add_kernel_spatial_dimensions(3); dimension_numbers.add_kernel_spatial_dimensions(1); std::unique_ptr<Array4D<float>> actual = ReferenceUtil::ConvArray4DGeneralDimensions( input, weight, {1, 1}, Padding::kValid, dimension_numbers); Array4D<float> expected({{{{2514, 2685}}}}); auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal, ErrorSpec(0.0001)); }
#include "extensions/protobuf/internal/duration.h" #include <cstdint> #include "google/protobuf/duration.pb.h" #include "absl/base/optimization.h" #include "absl/log/absl_check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/time/time.h" #include "extensions/protobuf/internal/duration_lite.h" #include "google/protobuf/descriptor.h" #include "google/protobuf/message.h" namespace cel::extensions::protobuf_internal { absl::StatusOr<absl::Duration> UnwrapDynamicDurationProto( const google::protobuf::Message& message) { ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Duration"); const auto* desc = message.GetDescriptor(); if (ABSL_PREDICT_FALSE(desc == nullptr)) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " missing descriptor")); } if (desc == google::protobuf::Duration::descriptor()) { return UnwrapGeneratedDurationProto( google::protobuf::DownCastToGenerated<google::protobuf::Duration>(message)); } const auto* reflect = message.GetReflection(); if (ABSL_PREDICT_FALSE(reflect == nullptr)) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " missing reflection")); } const auto* seconds_field = desc->FindFieldByNumber(google::protobuf::Duration::kSecondsFieldNumber); if (ABSL_PREDICT_FALSE(seconds_field == nullptr)) { return absl::InternalError(absl::StrCat( message.GetTypeName(), " missing seconds field descriptor")); } if (ABSL_PREDICT_FALSE(seconds_field->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_INT64)) { return absl::InternalError(absl::StrCat( message.GetTypeName(), " has unexpected seconds field type: ", seconds_field->cpp_type_name())); } if (ABSL_PREDICT_FALSE(seconds_field->is_map() || seconds_field->is_repeated())) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " has unexpected ", seconds_field->name(), " field cardinality: REPEATED")); } const auto* nanos_field = desc->FindFieldByNumber(google::protobuf::Duration::kNanosFieldNumber); if (ABSL_PREDICT_FALSE(nanos_field == nullptr)) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " missing nanos field descriptor")); } if (ABSL_PREDICT_FALSE(nanos_field->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_INT32)) { return absl::InternalError(absl::StrCat( message.GetTypeName(), " has unexpected nanos field type: ", nanos_field->cpp_type_name())); } if (ABSL_PREDICT_FALSE(nanos_field->is_map() || nanos_field->is_repeated())) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " has unexpected ", nanos_field->name(), " field cardinality: REPEATED")); } return absl::Seconds(reflect->GetInt64(message, seconds_field)) + absl::Nanoseconds(reflect->GetInt32(message, nanos_field)); } absl::Status WrapDynamicDurationProto(absl::Duration value, google::protobuf::Message& message) { ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Duration"); const auto* desc = message.GetDescriptor(); if (ABSL_PREDICT_FALSE(desc == nullptr)) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " missing descriptor")); } if (ABSL_PREDICT_TRUE(desc == google::protobuf::Duration::descriptor())) { return WrapGeneratedDurationProto( value, google::protobuf::DownCastToGenerated<google::protobuf::Duration>(message)); } const auto* reflect = message.GetReflection(); if (ABSL_PREDICT_FALSE(reflect == nullptr)) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " missing reflection")); } const auto* seconds_field = desc->FindFieldByNumber(google::protobuf::Duration::kSecondsFieldNumber); if (ABSL_PREDICT_FALSE(seconds_field == nullptr)) { return absl::InternalError(absl::StrCat( message.GetTypeName(), " missing seconds field descriptor")); } if (ABSL_PREDICT_FALSE(seconds_field->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_INT64)) { return absl::InternalError(absl::StrCat( message.GetTypeName(), " has unexpected seconds field type: ", seconds_field->cpp_type_name())); } if (ABSL_PREDICT_FALSE(seconds_field->is_map() || seconds_field->is_repeated())) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " has unexpected ", seconds_field->name(), " field cardinality: REPEATED")); } const auto* nanos_field = desc->FindFieldByNumber(google::protobuf::Duration::kNanosFieldNumber); if (ABSL_PREDICT_FALSE(nanos_field == nullptr)) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " missing nanos field descriptor")); } if (ABSL_PREDICT_FALSE(nanos_field->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_INT32)) { return absl::InternalError(absl::StrCat( message.GetTypeName(), " has unexpected nanos field type: ", nanos_field->cpp_type_name())); } if (ABSL_PREDICT_FALSE(nanos_field->is_map() || nanos_field->is_repeated())) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " has unexpected ", nanos_field->name(), " field cardinality: REPEATED")); } reflect->SetInt64(&message, seconds_field, absl::IDivDuration(value, absl::Seconds(1), &value)); reflect->SetInt32(&message, nanos_field, static_cast<int32_t>(absl::IDivDuration( value, absl::Nanoseconds(1), &value))); return absl::OkStatus(); } }
#include "extensions/protobuf/internal/duration.h" #include <memory> #include "google/protobuf/duration.pb.h" #include "google/protobuf/descriptor.pb.h" #include "absl/memory/memory.h" #include "absl/time/time.h" #include "extensions/protobuf/internal/duration_lite.h" #include "internal/testing.h" #include "google/protobuf/descriptor.h" #include "google/protobuf/descriptor_database.h" #include "google/protobuf/dynamic_message.h" namespace cel::extensions::protobuf_internal { namespace { using testing::Eq; using cel::internal::IsOkAndHolds; TEST(Duration, GeneratedFromProto) { EXPECT_THAT(UnwrapGeneratedDurationProto(google::protobuf::Duration()), IsOkAndHolds(Eq(absl::ZeroDuration()))); } TEST(Duration, CustomFromProto) { google::protobuf::SimpleDescriptorDatabase database; { google::protobuf::FileDescriptorProto fd; google::protobuf::Duration::descriptor()->file()->CopyTo(&fd); ASSERT_TRUE(database.Add(fd)); } google::protobuf::DescriptorPool pool(&database); pool.AllowUnknownDependencies(); google::protobuf::DynamicMessageFactory factory(&pool); factory.SetDelegateToGeneratedFactory(false); EXPECT_THAT(UnwrapDynamicDurationProto(*factory.GetPrototype( pool.FindMessageTypeByName("google.protobuf.Duration"))), IsOkAndHolds(Eq(absl::ZeroDuration()))); } TEST(Duration, GeneratedToProto) { google::protobuf::Duration proto; ASSERT_OK(WrapGeneratedDurationProto(absl::Seconds(1) + absl::Nanoseconds(2), proto)); EXPECT_EQ(proto.seconds(), 1); EXPECT_EQ(proto.nanos(), 2); } TEST(Duration, CustomToProto) { google::protobuf::SimpleDescriptorDatabase database; { google::protobuf::FileDescriptorProto fd; google::protobuf::Duration::descriptor()->file()->CopyTo(&fd); ASSERT_TRUE(database.Add(fd)); } google::protobuf::DescriptorPool pool(&database); pool.AllowUnknownDependencies(); google::protobuf::DynamicMessageFactory factory(&pool); factory.SetDelegateToGeneratedFactory(false); std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique( factory .GetPrototype(pool.FindMessageTypeByName("google.protobuf.Duration")) ->New()); const auto* descriptor = proto->GetDescriptor(); const auto* reflection = proto->GetReflection(); const auto* seconds_field = descriptor->FindFieldByName("seconds"); ASSERT_NE(seconds_field, nullptr); const auto* nanos_field = descriptor->FindFieldByName("nanos"); ASSERT_NE(nanos_field, nullptr); ASSERT_OK(WrapDynamicDurationProto(absl::Seconds(1) + absl::Nanoseconds(2), *proto)); EXPECT_EQ(reflection->GetInt64(*proto, seconds_field), 1); EXPECT_EQ(reflection->GetInt32(*proto, nanos_field), 2); } } }
absl::Status WrapDynamicDurationProto(absl::Duration value, google::protobuf::Message& message) { ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Duration"); const auto* desc = message.GetDescriptor(); if (ABSL_PREDICT_FALSE(desc == nullptr)) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " missing descriptor")); } if (ABSL_PREDICT_TRUE(desc == google::protobuf::Duration::descriptor())) { return WrapGeneratedDurationProto( value, google::protobuf::DownCastToGenerated<google::protobuf::Duration>(message)); } const auto* reflect = message.GetReflection(); if (ABSL_PREDICT_FALSE(reflect == nullptr)) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " missing reflection")); } const auto* seconds_field = desc->FindFieldByNumber(google::protobuf::Duration::kSecondsFieldNumber); if (ABSL_PREDICT_FALSE(seconds_field == nullptr)) { return absl::InternalError(absl::StrCat( message.GetTypeName(), " missing seconds field descriptor")); } if (ABSL_PREDICT_FALSE(seconds_field->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_INT64)) { return absl::InternalError(absl::StrCat( message.GetTypeName(), " has unexpected seconds field type: ", seconds_field->cpp_type_name())); } if (ABSL_PREDICT_FALSE(seconds_field->is_map() || seconds_field->is_repeated())) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " has unexpected ", seconds_field->name(), " field cardinality: REPEATED")); } const auto* nanos_field = desc->FindFieldByNumber(google::protobuf::Duration::kNanosFieldNumber); if (ABSL_PREDICT_FALSE(nanos_field == nullptr)) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " missing nanos field descriptor")); } if (ABSL_PREDICT_FALSE(nanos_field->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_INT32)) { return absl::InternalError(absl::StrCat( message.GetTypeName(), " has unexpected nanos field type: ", nanos_field->cpp_type_name())); } if (ABSL_PREDICT_FALSE(nanos_field->is_map() || nanos_field->is_repeated())) { return absl::InternalError( absl::StrCat(message.GetTypeName(), " has unexpected ", nanos_field->name(), " field cardinality: REPEATED")); } reflect->SetInt64(&message, seconds_field, absl::IDivDuration(value, absl::Seconds(1), &value)); reflect->SetInt32(&message, nanos_field, static_cast<int32_t>(absl::IDivDuration( value, absl::Nanoseconds(1), &value))); return absl::OkStatus(); }
TEST(Duration, CustomToProto) { google::protobuf::SimpleDescriptorDatabase database; { google::protobuf::FileDescriptorProto fd; google::protobuf::Duration::descriptor()->file()->CopyTo(&fd); ASSERT_TRUE(database.Add(fd)); } google::protobuf::DescriptorPool pool(&database); pool.AllowUnknownDependencies(); google::protobuf::DynamicMessageFactory factory(&pool); factory.SetDelegateToGeneratedFactory(false); std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique( factory .GetPrototype(pool.FindMessageTypeByName("google.protobuf.Duration")) ->New()); const auto* descriptor = proto->GetDescriptor(); const auto* reflection = proto->GetReflection(); const auto* seconds_field = descriptor->FindFieldByName("seconds"); ASSERT_NE(seconds_field, nullptr); const auto* nanos_field = descriptor->FindFieldByName("nanos"); ASSERT_NE(nanos_field, nullptr); ASSERT_OK(WrapDynamicDurationProto(absl::Seconds(1) + absl::Nanoseconds(2), *proto)); EXPECT_EQ(reflection->GetInt64(*proto, seconds_field), 1); EXPECT_EQ(reflection->GetInt32(*proto, nanos_field), 2); }
#include "tensorstore/progress.h" #include <ostream> namespace tensorstore { bool operator==(const ReadProgress& a, const ReadProgress& b) { return a.total_elements == b.total_elements && a.copied_elements == b.copied_elements; } bool operator!=(const ReadProgress& a, const ReadProgress& b) { return !(a == b); } std::ostream& operator<<(std::ostream& os, const ReadProgress& a) { return os << "{ total_elements=" << a.total_elements << ", copied_elements=" << a.copied_elements << " }"; } bool operator==(const WriteProgress& a, const WriteProgress& b) { return a.total_elements == b.total_elements && a.copied_elements == b.copied_elements && a.committed_elements == b.committed_elements; } bool operator!=(const WriteProgress& a, const WriteProgress& b) { return !(a == b); } std::ostream& operator<<(std::ostream& os, const WriteProgress& a) { return os << "{ total_elements=" << a.total_elements << ", copied_elements=" << a.copied_elements << ", committed_elements=" << a.committed_elements << " }"; } bool operator==(const CopyProgress& a, const CopyProgress& b) { return a.total_elements == b.total_elements && a.read_elements == b.read_elements && a.copied_elements == b.copied_elements && a.committed_elements == b.committed_elements; } bool operator!=(const CopyProgress& a, const CopyProgress& b) { return !(a == b); } std::ostream& operator<<(std::ostream& os, const CopyProgress& a) { return os << "{ total_elements=" << a.total_elements << ", read_elements=" << a.read_elements << ", copied_elements=" << a.copied_elements << ", committed_elements=" << a.committed_elements << " }"; } }
#include "tensorstore/progress.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/util/str_cat.h" namespace { using ::tensorstore::CopyProgress; using ::tensorstore::ReadProgress; using ::tensorstore::WriteProgress; TEST(ReadProgressTest, Comparison) { ReadProgress a{1, 1}; ReadProgress b{2, 2}; ReadProgress c{2, 1}; EXPECT_EQ(a, a); EXPECT_EQ(b, b); EXPECT_EQ(c, c); EXPECT_NE(a, b); EXPECT_NE(a, c); EXPECT_NE(b, c); } TEST(ReadProgressTest, Ostream) { EXPECT_EQ("{ total_elements=2, copied_elements=1 }", tensorstore::StrCat(ReadProgress{2, 1})); } TEST(WriteProgressTest, Comparison) { WriteProgress a{1, 1, 1}; WriteProgress b{2, 2, 2}; WriteProgress c{2, 1, 1}; WriteProgress d{2, 1, 2}; EXPECT_EQ(a, a); EXPECT_EQ(b, b); EXPECT_EQ(c, c); EXPECT_EQ(d, d); EXPECT_NE(a, b); EXPECT_NE(a, c); EXPECT_NE(a, d); EXPECT_NE(b, d); EXPECT_NE(b, c); EXPECT_NE(c, d); } TEST(WriteProgressTest, Ostream) { EXPECT_EQ("{ total_elements=3, copied_elements=2, committed_elements=1 }", tensorstore::StrCat(WriteProgress{3, 2, 1})); } TEST(CopyProgressTest, Comparison) { CopyProgress a{1, 1, 1, 1}; CopyProgress b{2, 1, 1, 1}; CopyProgress c{1, 2, 1, 1}; CopyProgress d{1, 1, 2, 1}; CopyProgress e{1, 1, 1, 2}; EXPECT_EQ(a, a); EXPECT_EQ(b, b); EXPECT_EQ(c, c); EXPECT_EQ(d, d); EXPECT_EQ(e, e); EXPECT_NE(a, b); EXPECT_NE(a, c); EXPECT_NE(a, d); EXPECT_NE(a, e); } TEST(CopyProgressTest, Ostream) { EXPECT_EQ( "{ total_elements=4, read_elements=3, copied_elements=2, " "committed_elements=1 }", tensorstore::StrCat(CopyProgress{4, 3, 2, 1})); } }
bool operator==(const WriteProgress& a, const WriteProgress& b) { return a.total_elements == b.total_elements && a.copied_elements == b.copied_elements && a.committed_elements == b.committed_elements; }
TEST(WriteProgressTest, Comparison) { WriteProgress a{1, 1, 1}; WriteProgress b{2, 2, 2}; WriteProgress c{2, 1, 1}; WriteProgress d{2, 1, 2}; EXPECT_EQ(a, a); EXPECT_EQ(b, b); EXPECT_EQ(c, c); EXPECT_EQ(d, d); EXPECT_NE(a, b); EXPECT_NE(a, c); EXPECT_NE(a, d); EXPECT_NE(b, d); EXPECT_NE(b, c); EXPECT_NE(c, d); }
#ifndef TENSORSTORE_UTIL_RATIONAL_H_ #define TENSORSTORE_UTIL_RATIONAL_H_ #include <climits> #include <cmath> #include <limits> #include <ostream> #include <type_traits> #include "tensorstore/internal/integer_overflow.h" #include "tensorstore/serialization/fwd.h" #include "tensorstore/util/division.h" namespace tensorstore { template <typename I> class Rational { static_assert(std::numeric_limits<I>::is_specialized && std::numeric_limits<I>::is_integer && std::numeric_limits<I>::is_signed); public: using int_type = I; constexpr Rational(I value = 0) : n_(value), d_(1) {} constexpr Rational(I n, I d) { if (d != 0) { I gcd = tensorstore::GreatestCommonDivisor(n, d); n /= gcd; d /= gcd; if (n == 0) { d = 1; } else if (d < 0) { if (n == std::numeric_limits<I>::min() || d == std::numeric_limits<I>::min()) { d = 0; } else { d = -d; n = -n; } } } n_ = n; d_ = d; } constexpr Rational& operator=(I value) { *this = Rational<I>(value); return *this; } constexpr I numerator() const { return n_; } constexpr I denominator() const { return d_; } constexpr bool is_nan() const { return d_ == 0; } static constexpr Rational nan() { Rational r; r.d_ = 0; return r; } constexpr explicit operator bool() const { return d_ == 0 || n_ != 0; } friend constexpr bool operator==(Rational t, Rational r) { return t.d_ != 0 && t.n_ == r.n_ && t.d_ == r.d_; } friend constexpr bool operator==(Rational t, I i) { return t.d_ == 1 && t.n_ == i; } friend constexpr bool operator==(I i, Rational t) { return (t == i); } friend constexpr bool operator!=(Rational t, Rational r) { return !(t == r); } friend constexpr bool operator!=(Rational t, I i) { return !(t == i); } friend constexpr bool operator!=(I i, Rational t) { return !(t == i); } friend std::ostream& operator<<(std::ostream& os, Rational x) { if (x.is_nan()) return os << "nan"; if (x.d_ == 1) return os << x.n_; return os << x.n_ << '/' << x.d_; } constexpr Rational operator+() const { return *this; } constexpr Rational operator-() const { Rational r; if (n_ == std::numeric_limits<I>::min()) return nan(); r.n_ = -n_; r.d_ = d_; return r; } friend constexpr Rational operator+(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return nan(); I g = GreatestCommonDivisor(t.d_, r.d_); t.d_ /= g; if (I temp; internal::MulOverflow(t.n_, r.d_ / g, &t.n_) || internal::MulOverflow(r.n_, t.d_, &temp) || internal::AddOverflow(t.n_, temp, &t.n_)) { return nan(); } g = GreatestCommonDivisor(t.n_, g); t.n_ /= g; if (internal::MulOverflow(t.d_, r.d_ / g, &t.d_)) return nan(); return t; } friend constexpr Rational operator+(Rational t, I i) { if (internal::MulOverflow(i, t.d_, &i) || internal::AddOverflow(t.n_, i, &t.n_)) { return nan(); } return t; } friend constexpr Rational operator+(I i, Rational t) { return t + i; } constexpr Rational& operator+=(Rational r) { return *this = *this + r; } constexpr Rational& operator+=(I i) { return *this = *this + i; } constexpr Rational& operator++() { return *this += 1; } constexpr Rational operator++(int) { Rational r = *this; *this += 1; return r; } friend constexpr Rational operator-(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return nan(); I g = GreatestCommonDivisor(t.d_, r.d_); t.d_ /= g; if (I temp; internal::MulOverflow(t.n_, r.d_ / g, &t.n_) || internal::MulOverflow(r.n_, t.d_, &temp) || internal::SubOverflow(t.n_, temp, &t.n_)) { return nan(); } g = GreatestCommonDivisor(t.n_, g); t.n_ /= g; if (internal::MulOverflow(t.d_, r.d_ / g, &t.d_)) return nan(); return t; } friend constexpr Rational operator-(Rational t, I i) { if (internal::MulOverflow(i, t.d_, &i) || internal::SubOverflow(t.n_, i, &t.n_)) { return nan(); } return t; } friend constexpr Rational operator-(I i, Rational r) { if (internal::MulOverflow(i, r.d_, &i) || internal::SubOverflow(i, r.n_, &r.n_)) { return nan(); } return r; } constexpr Rational& operator-=(Rational r) { return *this = *this - r; } constexpr Rational& operator-=(I i) { return *this = *this - i; } constexpr Rational& operator--() { return *this -= 1; } constexpr Rational operator--(int) { Rational r = *this; *this -= 1; return r; } friend constexpr Rational operator*(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return nan(); I gcd1 = GreatestCommonDivisor(t.n_, r.d_); I gcd2 = GreatestCommonDivisor(r.n_, t.d_); if (internal::MulOverflow(t.n_ / gcd1, r.n_ / gcd2, &t.n_) || internal::MulOverflow(t.d_ / gcd2, r.d_ / gcd1, &t.d_)) { return nan(); } return t; } friend constexpr Rational operator*(Rational t, I i) { if (t.is_nan()) return nan(); I gcd = GreatestCommonDivisor(i, t.d_); if (internal::MulOverflow(t.n_, i / gcd, &t.n_)) return nan(); t.d_ /= gcd; return t; } friend constexpr Rational operator*(I i, Rational t) { return t * i; } constexpr Rational& operator*=(Rational r) { return *this = *this * r; } constexpr Rational& operator*=(I i) { return *this = *this * i; } friend constexpr Rational operator/(Rational t, Rational r) { if (t.is_nan() || r.is_nan() || r.n_ == 0) return nan(); I gcd1 = GreatestCommonDivisor(t.n_, r.n_); I gcd2 = GreatestCommonDivisor(r.d_, t.d_); if (internal::MulOverflow(t.n_ / gcd1, r.d_ / gcd2, &t.n_) || internal::MulOverflow(t.d_ / gcd2, r.n_ / gcd1, &t.d_)) { return nan(); } if (t.d_ < 0) { if (t.d_ == std::numeric_limits<I>::min() || t.n_ == std::numeric_limits<I>::min()) { return nan(); } t.n_ = -t.n_; t.d_ = -t.d_; } return t; } friend constexpr Rational operator/(Rational t, I i) { if (t.is_nan() || i == 0) return nan(); I gcd = GreatestCommonDivisor(i, t.n_); t.n_ /= gcd; if (internal::MulOverflow(t.d_, i / gcd, &t.d_)) return nan(); return t; } friend constexpr Rational operator/(I i, Rational r) { if (r.is_nan() || r.n_ == 0) return nan(); I gcd1 = GreatestCommonDivisor(i, r.n_); Rational t; if (internal::MulOverflow(i / gcd1, r.d_, &t.n_)) return nan(); t.d_ = r.n_ / gcd1; if (t.d_ < 0) { if (t.d_ == std::numeric_limits<I>::min() || t.n_ == std::numeric_limits<I>::min()) { return nan(); } t.n_ = -t.n_; t.d_ = -t.d_; } return t; } constexpr Rational& operator/=(Rational r) { return *this = *this / r; } constexpr Rational& operator/=(I i) { return *this = *this / i; } friend constexpr bool operator<(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return false; if (t.d_ == r.d_) return t.n_ < r.n_; if (t.d_ == 1) return t.n_ < r; if (r.d_ == 1) return t < r.n_; ContinuedFraction ts{t}, rs{r}; bool reverse = false; while (true) { if (ts.q != rs.q) { return reverse ? ts.q > rs.q : ts.q < rs.q; } reverse = !reverse; if (ts.r == 0 || rs.r == 0) { break; } ts.next(); rs.next(); } if (ts.r == rs.r) { return false; } else { return (ts.r != 0) != reverse; } } friend constexpr bool operator<(Rational t, I i) { if (t.is_nan()) return false; I q = t.n_ / t.d_, r = t.n_ % t.d_; if (r < 0) { r += t.d_; --q; } return q < i; } friend constexpr bool operator<(I i, Rational t) { if (t.is_nan()) return false; I q = t.n_ / t.d_, r = t.n_ % t.d_; if (r > 0) { r -= t.d_; ++q; } return q > i; } friend constexpr bool operator>(Rational t, Rational r) { return r < t; } friend constexpr bool operator>(I i, Rational t) { return t < i; } friend constexpr bool operator>(Rational t, I i) { return i < t; } friend constexpr bool operator<=(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return false; return !(r < t); } friend constexpr bool operator<=(Rational t, I r) { if (t.is_nan()) return false; return !(r < t); } friend constexpr bool operator<=(I r, Rational t) { if (t.is_nan()) return false; return !(t < r); } friend constexpr bool operator>=(Rational t, Rational r) { if (t.is_nan() || r.is_nan()) return false; return !(t < r); } friend constexpr bool operator>=(I t, Rational r) { if (r.is_nan()) return false; return !(t < r); } friend constexpr bool operator>=(Rational t, I r) { if (t.is_nan()) return false; return !(t < r); } [[nodiscard]] static constexpr bool UnifyDenominators(Rational a, Rational b, I& numerator_a, I& numerator_b, I& denominator) { if (a.is_nan() || b.is_nan()) return false; I g = GreatestCommonDivisor(a.denominator(), b.denominator()); I a_factor = b.denominator() / g; I b_factor = a.denominator() / g; return !(internal::MulOverflow(b_factor, b.denominator(), &denominator) || internal::MulOverflow(a.numerator(), a_factor, &numerator_a) || internal::MulOverflow(b.numerator(), b_factor, &numerator_b)); } static Rational FromDouble(double value) { if (!std::isfinite(value)) return nan(); constexpr int max_exponent = sizeof(I) * CHAR_BIT - 2; int exponent; double mantissa = std::frexp(value, &exponent); if (exponent > max_exponent + 1) return nan(); if (exponent <= -max_exponent) return I(0); int n = std::min(max_exponent + 1, max_exponent + exponent); I numerator = static_cast<I>(std::round(std::ldexp(mantissa, n))); I denominator = I(1) << (n - exponent); return {numerator, denominator}; } constexpr Rational Approximate(I max_denominator) const { assert(max_denominator >= I(1)); if (d_ <= max_denominator) return *this; using U = std::make_unsigned_t<I>; U p0 = 0, q0 = 1, p1 = 1, q1 = 0; bool negative = false; U n = 0, d = d_; if (n_ < I(0)) { negative = true; n = ~static_cast<U>(n_) + U(1); } else { n = static_cast<U>(n_); } while (true) { U a = n / d; U r = n % d; U q2 = q0 + a * q1; if (q2 >= max_denominator) { U x = (max_denominator - q0) / q1; auto result = (x * 2 >= a) ? FromReduced(static_cast<I>(p0 + x * p1), static_cast<I>(q0 + x * q1)) : FromReduced(static_cast<I>(p1), static_cast<I>(q1)); if (negative) { result.n_ *= -1; } return result; } U p2 = p0 + a * p1; p0 = p1; q0 = q1; p1 = p2; q1 = q2; n = d; d = r; } } constexpr static auto ApplyMembers = [](auto&& x, auto f) { return f(x.n_, x.d_); }; private: static constexpr Rational FromReduced(I n, I d) { Rational r; r.n_ = n; r.d_ = d; return r; } struct ContinuedFraction { constexpr explicit ContinuedFraction(Rational x) : n(x.n_), d(x.d_), q(x.n_ / x.d_), r(x.n_ % x.d_) { if (r < 0) { r += d; --q; } } constexpr void next() { n = d; d = r; q = n / d; r = n % d; } I n, d, q, r; }; I n_ = 0; I d_ = 0; }; } #endif
#include "tensorstore/util/rational.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/util/str_cat.h" namespace { constexpr double pi = 3.14159265358979323846264338327950288; using ::tensorstore::Rational; TEST(RationalTest, Initialization) { static constexpr Rational<int> r1, r2(0), r3(1), r4(-3), r5(7, 2), r6(5, 15), r7(14, -21), r8(-4, 6), r9(-14, -70); static_assert(r1.numerator() == 0); static_assert(r2.numerator() == 0); static_assert(r3.numerator() == 1); static_assert(r4.numerator() == -3); static_assert(r5.numerator() == 7); static_assert(r6.numerator() == 1); static_assert(r7.numerator() == -2); static_assert(r8.numerator() == -2); static_assert(r9.numerator() == 1); static_assert(r1.denominator() == 1); static_assert(r2.denominator() == 1); static_assert(r3.denominator() == 1); static_assert(r4.denominator() == 1); static_assert(r5.denominator() == 2); static_assert(r6.denominator() == 3); static_assert(r7.denominator() == 3); static_assert(r8.denominator() == 3); static_assert(r9.denominator() == 5); static_assert(Rational<int>(0, 0).is_nan()); static_assert(Rational<int>(1, std::numeric_limits<int>::min()).is_nan()); static_assert(!Rational<int>(1, -std::numeric_limits<int>::max()).is_nan()); } TEST(RationalTest, Compare) { static constexpr Rational<int> r1, r2(0), r3(1), r4(-3), r5(7, 2), r6(5, 15), r7(14, -21), r8(-4, 6), r9(-14, -70), nan = Rational<int>::nan(); static_assert(r1 == r2); static_assert(r2 != r3); static_assert(r4 < r3); static_assert(r4 <= r5); static_assert(r1 <= r2); static_assert(r5 > r6); static_assert(r5 >= r6); static_assert(r7 >= r8); static_assert(!(r3 == r2)); static_assert(!(r1 != r2)); static_assert(!(r1 < r2)); static_assert(!(r5 < r6)); static_assert(!(r9 <= r2)); static_assert(!(r8 > r7)); static_assert(!(r8 > r2)); static_assert(!(r4 >= r6)); static_assert(r1 == 0); static_assert(r2 != -1); static_assert(r3 < 2); static_assert(r4 <= -3); static_assert(r5 > 3); static_assert(r6 >= 0); static_assert(0 == r2); static_assert(0 != r7); static_assert(-1 < r8); static_assert(-2 <= r9); static_assert(1 > r1); static_assert(1 >= r3); static constexpr Rational<int> x1(9, 4); static constexpr Rational<int> x2(61, 27); static constexpr Rational<int> x3(52, 23); static constexpr Rational<int> x4(70, 31); static_assert(x1 < x2); static_assert(!(x1 < x1)); static_assert(!(x2 < x2)); static_assert(!(x2 < x1)); static_assert(x2 < x3); static_assert(x4 < x2); static_assert(!(x3 < x4)); static_assert(r7 < x1); static_assert(!(x2 < r7)); static_assert(!(nan < nan)); static_assert(!(nan <= nan)); static_assert(!(nan == nan)); static_assert(nan != nan); static_assert(!(nan > nan)); static_assert(!(nan >= nan)); static_assert(!(nan < r1)); static_assert(!(nan == r1)); static_assert(nan != r1); static_assert(!(nan <= r1)); static_assert(!(nan > r1)); static_assert(!(nan >= r1)); static_assert(!(r1 < nan)); static_assert(!(r1 <= nan)); static_assert(!(r1 == nan)); static_assert(r1 != nan); static_assert(!(r1 > nan)); static_assert(!(r1 >= nan)); static_assert(!(nan < 0)); static_assert(!(nan == 0)); static_assert(nan != 0); static_assert(!(nan <= 0)); static_assert(!(nan > 0)); static_assert(!(nan >= 0)); static_assert(!(0 < nan)); static_assert(!(0 <= nan)); static_assert(!(0 == nan)); static_assert(0 != nan); static_assert(!(0 > nan)); static_assert(!(0 >= nan)); } TEST(RationalTest, Increment) { Rational<int> r1, r2(0), r3(1), r7(14, -21), r8(-4, 6); EXPECT_EQ(r1++, r2); EXPECT_NE(r1, r2); EXPECT_EQ(r1, r3); EXPECT_EQ(--r1, r2); EXPECT_EQ(r8--, r7); EXPECT_NE(r8, r7); EXPECT_EQ(++r8, r7); Rational<int> x1 = std::numeric_limits<int>::max(); EXPECT_FALSE(x1.is_nan()); ++x1; EXPECT_TRUE(x1.is_nan()); Rational<int> x2 = std::numeric_limits<int>::min(); EXPECT_FALSE(x2.is_nan()); --x2; EXPECT_TRUE(x2.is_nan()); } TEST(RationalTest, UnaryOperators) { static constexpr Rational<int> r2(0), r3(1), r4(-3), r5(7, 2); static_assert(+r5 == r5); static_assert(-r3 != r3); static_assert(-(-r3) == r3); static_assert(-r4 == 3); static_assert(!r2); static_assert(!!r3); static_assert(r3); } TEST(RationalTest, Addition) { using T = int; using rational_type = Rational<T>; EXPECT_EQ(rational_type(1, 2) + rational_type(1, 2), static_cast<T>(1)); EXPECT_EQ(rational_type(11, 3) + rational_type(1, 2), rational_type(25, 6)); EXPECT_EQ(rational_type(-8, 3) + rational_type(1, 5), rational_type(-37, 15)); EXPECT_EQ(rational_type(-7, 6) + rational_type(1, 7), rational_type(1, 7) - rational_type(7, 6)); EXPECT_EQ(rational_type(13, 5) - rational_type(1, 2), rational_type(21, 10)); EXPECT_EQ(rational_type(22, 3) + static_cast<T>(1), rational_type(25, 3)); EXPECT_EQ(rational_type(12, 7) - static_cast<T>(2), rational_type(-2, 7)); EXPECT_EQ(static_cast<T>(3) + rational_type(4, 5), rational_type(19, 5)); EXPECT_EQ(static_cast<T>(4) - rational_type(9, 2), rational_type(-1, 2)); rational_type r(11); r -= rational_type(20, 3); EXPECT_EQ(r, rational_type(13, 3)); r += rational_type(1, 2); EXPECT_EQ(r, rational_type(29, 6)); r -= static_cast<T>(5); EXPECT_EQ(r, rational_type(1, -6)); r += rational_type(1, 5); EXPECT_EQ(r, rational_type(1, 30)); r += static_cast<T>(2); EXPECT_EQ(r, rational_type(61, 30)); } TEST(RationalTest, Multiplication) { using T = int; using rational_type = Rational<T>; EXPECT_EQ(rational_type(1, 3) * rational_type(-3, 4), rational_type(-1, 4)); EXPECT_EQ(rational_type(2, 5) * static_cast<T>(7), rational_type(14, 5)); EXPECT_EQ(static_cast<T>(-2) * rational_type(1, 6), rational_type(-1, 3)); rational_type r = rational_type(3, 7); r *= static_cast<T>(14); EXPECT_EQ(r, static_cast<T>(6)); r *= rational_type(3, 8); EXPECT_EQ(r, rational_type(9, 4)); } TEST(RationalTest, Division) { using T = int; using rational_type = Rational<T>; EXPECT_EQ(rational_type(-1, 20) / rational_type(4, 5), rational_type(-1, 16)); EXPECT_EQ(rational_type(5, 6) / static_cast<T>(7), rational_type(5, 42)); EXPECT_EQ(static_cast<T>(8) / rational_type(2, 7), static_cast<T>(28)); EXPECT_TRUE((rational_type(23, 17) / rational_type()).is_nan()); EXPECT_TRUE((rational_type(4, 15) / static_cast<T>(0)).is_nan()); rational_type r = rational_type(4, 3); r /= rational_type(5, 4); EXPECT_EQ(r, rational_type(16, 15)); r /= static_cast<T>(4); EXPECT_EQ(r, rational_type(4, 15)); EXPECT_TRUE((r /= rational_type()).is_nan()); EXPECT_TRUE((r /= static_cast<T>(0)).is_nan()); EXPECT_EQ(rational_type(-1) / rational_type(-3), rational_type(1, 3)); } TEST(RationalTest, AssignArithmetic) { using T = int; using rational_type = Rational<T>; rational_type r = rational_type(4, 3); r += r; EXPECT_EQ(r, rational_type(8, 3)); r *= r; EXPECT_EQ(r, rational_type(64, 9)); rational_type s = r; r /= s; EXPECT_EQ(r, rational_type(1, 1)); s = r; r -= s; EXPECT_EQ(r, rational_type(0, 1)); s = r; EXPECT_TRUE((r /= s).is_nan()); } TEST(RationalTest, Ostream) { EXPECT_EQ("nan", tensorstore::StrCat(Rational<int>::nan())); EXPECT_EQ("5", tensorstore::StrCat(Rational<int>(5))); EXPECT_EQ("22/7", tensorstore::StrCat(Rational<int>(44, 14))); } TEST(RationalTest, Overflow) { using R = Rational<int32_t>; { R r = R(2147483647) + R(1); EXPECT_TRUE(r.is_nan()); } { R r = R(2147483647) - R(-1); EXPECT_TRUE(r.is_nan()); } { R r = R(2147483647) * R(2); EXPECT_TRUE(r.is_nan()); } EXPECT_EQ(R(2147483647, 2), R(2147483647) / R(2)); { R r = R(2147483647, 2) * R(3); EXPECT_TRUE(r.is_nan()); } { R r = R(2147483647, 2) / R(1, 3); EXPECT_TRUE(r.is_nan()); } } TEST(UnifyDenominatorsTest, Overflow) { using R = Rational<int32_t>; int32_t num0, num1, den; EXPECT_FALSE( R::UnifyDenominators({1, 2147483647}, {1, 2147483646}, num0, num1, den)); EXPECT_FALSE(R::UnifyDenominators(R::nan(), 1, num0, num1, den)); EXPECT_FALSE(R::UnifyDenominators(1, R::nan(), num0, num1, den)); EXPECT_FALSE(R::UnifyDenominators(R::nan(), R::nan(), num0, num1, den)); } TEST(UnifyDenominatorsTest, NoOverflow) { using R = Rational<int32_t>; R r0(1, 3); R r1(1, 2); int32_t num0, num1, den; EXPECT_TRUE(R::UnifyDenominators(r0, r1, num0, num1, den)); EXPECT_EQ(2, num0); EXPECT_EQ(3, num1); EXPECT_EQ(6, den); } TEST(FromDoubleTest, Simple) { using R = Rational<int64_t>; EXPECT_EQ(R(0), R::FromDouble(0)); EXPECT_EQ(R(1, 2), R::FromDouble(0.5)); EXPECT_EQ(R(1, 4), R::FromDouble(0.25)); EXPECT_EQ(R(1, 8), R::FromDouble(0.125)); EXPECT_EQ(R(-1), R::FromDouble(-1)); EXPECT_EQ(R(1), R::FromDouble(1)); EXPECT_EQ(R(5404319552844595, 18014398509481984), R::FromDouble(0.3)); EXPECT_EQ(R(-5404319552844595, 18014398509481984), R::FromDouble(-0.3)); for (int i = 1; i <= 62; ++i) { SCOPED_TRACE(tensorstore::StrCat("i=", i)); EXPECT_EQ(R(1, static_cast<int64_t>(1) << i), R::FromDouble(std::ldexp(1.0, -i))); EXPECT_EQ(R(-1, static_cast<int64_t>(1) << i), R::FromDouble(std::ldexp(-1.0, -i))); EXPECT_EQ(R(static_cast<int64_t>(1) << i), R::FromDouble(std::ldexp(1.0, i))); EXPECT_EQ(R(static_cast<int64_t>(-1) << i), R::FromDouble(std::ldexp(-1.0, i))); } EXPECT_EQ(R(1, static_cast<int64_t>(1) << 53), R::FromDouble(0x1.0000000000000p-53)); EXPECT_EQ(R(0), R::FromDouble(0x1.0000000000000p-63)); EXPECT_EQ(R(884279719003555, 281474976710656), R::FromDouble(pi)); } TEST(ApproximateTest, Simple) { using R = Rational<int64_t>; EXPECT_EQ(R(1), R(1).Approximate(100)); EXPECT_EQ(R(-1), R(-1).Approximate(100)); EXPECT_EQ(R(-100), R(-100).Approximate(100)); EXPECT_EQ(R(1, 3), R::FromDouble(0.33333333333333333333333).Approximate(1000000)); EXPECT_EQ(R(-1, 3), R::FromDouble(-0.33333333333333333333333).Approximate(1000000)); EXPECT_EQ(R(3, 10), R::FromDouble(0.3).Approximate(1000000)); EXPECT_EQ(R(1, 5), R::FromDouble(1.0 / 5.0).Approximate(1000000)); EXPECT_EQ(R(22, 7), R::FromDouble(pi).Approximate(10)); EXPECT_EQ(R(311, 99), R::FromDouble(pi).Approximate(100)); EXPECT_EQ(R(355, 113), R::FromDouble(pi).Approximate(1000)); EXPECT_EQ(R(312689, 99532), R::FromDouble(pi).Approximate(100000)); } }
static Rational FromDouble(double value) { if (!std::isfinite(value)) return nan(); constexpr int max_exponent = sizeof(I) * CHAR_BIT - 2; int exponent; double mantissa = std::frexp(value, &exponent); if (exponent > max_exponent + 1) return nan(); if (exponent <= -max_exponent) return I(0); int n = std::min(max_exponent + 1, max_exponent + exponent); I numerator = static_cast<I>(std::round(std::ldexp(mantissa, n))); I denominator = I(1) << (n - exponent); return {numerator, denominator}; }
#include "arolla/memory/optional_value.h" #include <cstdint> #include "absl/strings/str_cat.h" #include "arolla/util/bytes.h" #include "arolla/util/repr.h" #include "arolla/util/text.h" namespace arolla { ReprToken ReprTraits<OptionalValue<bool>>::operator()( const OptionalValue<bool>& value) const { return ReprToken{ value.present ? absl::StrCat("optional_boolean{", Repr(value.value), "}") : "optional_boolean{NA}"}; } ReprToken ReprTraits<OptionalValue<int32_t>>::operator()( const OptionalValue<int32_t>& value) const { return ReprToken{value.present ? absl::StrCat("optional_int32{", Repr(value.value), "}") : "optional_int32{NA}"}; } ReprToken ReprTraits<OptionalValue<int64_t>>::operator()( const OptionalValue<int64_t>& value) const { return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value)) : "optional_int64{NA}"}; } ReprToken ReprTraits<OptionalValue<uint64_t>>::operator()( const OptionalValue<uint64_t>& value) const { return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value)) : "optional_uint64{NA}"}; } ReprToken ReprTraits<OptionalValue<float>>::operator()( const OptionalValue<float>& value) const { return ReprToken{ value.present ? absl::StrCat("optional_float32{", Repr(value.value), "}") : "optional_float32{NA}"}; } ReprToken ReprTraits<OptionalValue<double>>::operator()( const OptionalValue<double>& value) const { return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value)) : "optional_float64{NA}"}; } ReprToken ReprTraits<OptionalValue<Bytes>>::operator()( const OptionalValue<Bytes>& value) const { return ReprToken{value.present ? absl::StrCat("optional_bytes{", Repr(value.value), "}") : "optional_bytes{NA}"}; } ReprToken ReprTraits<OptionalValue<Text>>::operator()( const OptionalValue<Text>& value) const { return ReprToken{value.present ? absl::StrCat("optional_text{", Repr(value.value), "}") : "optional_text{NA}"}; } ReprToken ReprTraits<OptionalUnit>::operator()( const OptionalUnit& value) const { return ReprToken{value.present ? "present" : "missing"}; } }
#include "arolla/memory/optional_value.h" #include <cstdint> #include <cstring> #include <memory> #include <new> #include <optional> #include <sstream> #include <string> #include <type_traits> #include <utility> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "arolla/memory/frame.h" #include "arolla/memory/memory_allocation.h" #include "arolla/util/bytes.h" #include "arolla/util/repr.h" #include "arolla/util/testing/status_matchers_backport.h" #include "arolla/util/text.h" #include "arolla/util/view_types.h" namespace arolla { namespace testing { namespace { using ::testing::HasSubstr; using ::testing::Test; TEST(OptionalValueTest, TestEmptyValues) { OptionalValue<float> v1; EXPECT_FALSE(v1.present); OptionalValue<float> v2(std::optional<float>{}); EXPECT_FALSE(v2.present); OptionalValue<float> v3(std::nullopt); EXPECT_FALSE(v3.present); EXPECT_EQ(v1, v2); EXPECT_EQ(v1, v3); v1.value = 1.0f; v2.value = 2.0f; EXPECT_EQ(v1, v2); auto absl_v = v2.AsOptional(); EXPECT_FALSE(absl_v.has_value()); } TEST(OptionalValueTest, TestConstExpr) { static_assert(!OptionalValue<int>().present); static_assert(OptionalValue<int>(5).present); static_assert(OptionalValue<int>(5).value == 5); static_assert(MakeOptionalValue(5).present); static_assert(MakeOptionalValue(5).value == 5); } TEST(OptionalValueTest, TestPresentValues) { OptionalValue<float> v1(1.0f); EXPECT_TRUE(v1.present); EXPECT_EQ(1.0f, v1.value); EXPECT_EQ(Repr(v1), "optional_float32{1.}"); auto v_auto = MakeOptionalValue(1.0f); EXPECT_TRUE(v_auto.present); EXPECT_EQ(1.0f, v_auto.value); EXPECT_EQ(Repr(v_auto), "optional_float32{1.}"); OptionalValue<float> v2(std::optional<float>{2.0f}); EXPECT_TRUE(v2.present); EXPECT_EQ(2.0f, v2.value); EXPECT_EQ(Repr(v2), "optional_float32{2.}"); EXPECT_NE(v1, v2); v1.value = 2.0f; EXPECT_EQ(v1, v2); } TEST(OptionalValueTest, TestAssignment) { OptionalValue<float> v1; v1 = 1.0f; EXPECT_TRUE(v1.present); EXPECT_EQ(v1.value, 1.0f); v1 = std::nullopt; EXPECT_FALSE(v1.present); } TEST(OptionalValueTest, MakeStatusOrOptionalValue) { absl::StatusOr<OptionalValue<float>> v = MakeStatusOrOptionalValue(absl::StatusOr<float>(1.0f)); ASSERT_OK(v.status()); EXPECT_TRUE(v.value().present); EXPECT_EQ(v.value().value, 1.0f); absl::StatusOr<OptionalValue<float>> v_error = MakeStatusOrOptionalValue( absl::StatusOr<float>(absl::InternalError("fake"))); EXPECT_THAT(v_error.status(), StatusIs(absl::StatusCode::kInternal, HasSubstr("fake"))); } TEST(OptionalValueTest, OptionalUnit) { EXPECT_EQ(OptionalUnit(), kMissing); EXPECT_EQ(OptionalUnit(false), kMissing); EXPECT_FALSE(kMissing); EXPECT_FALSE(kMissing.present); EXPECT_EQ(Repr(kMissing), "missing"); EXPECT_EQ(OptionalUnit(true), kPresent); EXPECT_TRUE(kPresent); EXPECT_TRUE(kPresent.present); EXPECT_EQ(Repr(kPresent), "present"); } TEST(OptionalValueTest, Comparison) { OptionalValue<float> v0; v0.value = 1.0f; OptionalValue<float> v1(1.0f); OptionalValue<float> v2(2.0f); { EXPECT_TRUE(v1 == v1); EXPECT_TRUE(v0 == v0); EXPECT_FALSE(v1 == v2); EXPECT_FALSE(v1 == v0); EXPECT_FALSE(v1 != v1); EXPECT_FALSE(v0 != v0); EXPECT_TRUE(v1 != v2); EXPECT_TRUE(v1 != v0); OptionalValue<float> v0_2; v0_2.value = 2.0f; EXPECT_TRUE(v0 == v0_2); EXPECT_FALSE(v0 != v0_2); } { EXPECT_TRUE(v1 == 1.0f); EXPECT_TRUE(1.0f == v1); EXPECT_FALSE(v1 != 1.0f); EXPECT_FALSE(1.0f != v1); EXPECT_FALSE(v1 == 2.0f); EXPECT_FALSE(2.0f == v1); EXPECT_TRUE(v1 != 2.0f); EXPECT_TRUE(2.0f != v1); } { EXPECT_FALSE(v1 == std::nullopt); EXPECT_FALSE(std::nullopt == v1); EXPECT_TRUE(v0 == std::nullopt); EXPECT_TRUE(std::nullopt == v0); EXPECT_TRUE(v1 != std::nullopt); EXPECT_TRUE(std::nullopt != v1); EXPECT_FALSE(v0 != std::nullopt); EXPECT_FALSE(std::nullopt != v0); } } TEST(OptionalValueTest, TestImplicitConstructors) { OptionalValue<float> v = {}; EXPECT_EQ(v, OptionalValue<float>()); v = 3.5; EXPECT_EQ(v, OptionalValue<float>(3.5)); v = std::optional<float>(2.5); EXPECT_EQ(v, OptionalValue<float>(2.5)); } TEST(OptionalValueTest, TestMoves) { auto ptr = std::make_unique<std::string>("Hello!"); OptionalValue<std::unique_ptr<std::string>> v1(std::move(ptr)); EXPECT_TRUE(v1.present); EXPECT_EQ("Hello!", *(v1.value)); std::optional<std::unique_ptr<std::string>> v2(std::move(v1).AsOptional()); EXPECT_TRUE(v2.has_value()); EXPECT_EQ("Hello!", **v2); } template <typename T> using Slot = FrameLayout::Slot<T>; TEST(OptionalValueTest, TestFrameLayout) { FrameLayout::Builder builder; builder.AddSlot<double>(); builder.AddSlot<int32_t>(); auto optional_slot = builder.AddSlot<OptionalValue<float>>(); Slot<bool> presence_slot = optional_slot.GetSubslot<0>(); Slot<float> value_slot = optional_slot.GetSubslot<1>(); FrameLayout layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); frame.Set(optional_slot, OptionalValue<float>{1.0f}); EXPECT_EQ(true, frame.Get(presence_slot)); EXPECT_EQ(1.0f, frame.Get(value_slot)); frame.Set(value_slot, 2.0f); EXPECT_EQ(2.0, frame.Get(optional_slot).value); } TEST(OptionalValue, IsBZeroConstructible) { EXPECT_TRUE(is_bzero_constructible<OptionalValue<float>>()); EXPECT_TRUE(is_bzero_constructible<OptionalValue<int>>()); EXPECT_FALSE(is_bzero_constructible<OptionalValue<std::string>>()); } TEST(OptionalValue, BZeroStateIsEmptyValue) { using T = OptionalValue<float>; std::aligned_storage_t<sizeof(T), alignof(T)> storage; memset(&storage, 0, sizeof(storage)); EXPECT_FALSE(std::launder(reinterpret_cast<const T*>(&storage))->present); } TEST(OptionalValue, StructuredBindings) { { OptionalValue<float> f; auto [present, value] = f; EXPECT_FALSE(present); } { OptionalValue<float> f = 17.0; auto [present, value] = f; EXPECT_TRUE(present); EXPECT_EQ(value, 17.0); } } TEST(OptionalValue, ViewType) { static_assert(std::is_same_v<view_type_t<OptionalValue<int64_t>>, OptionalValue<int64_t>>); static_assert(std::is_same_v<view_type_t<OptionalValue<Bytes>>, OptionalValue<absl::string_view>>); auto fn = [](OptionalValue<absl::string_view> v) -> char { return (v.present && !v.value.empty()) ? v.value[0] : 'X'; }; EXPECT_EQ(fn(OptionalValue<Text>(Text("Hello"))), 'H'); EXPECT_EQ(fn(std::nullopt), 'X'); } TEST(OptionalValue, WrapFnToAcceptOptionalArgs) { { auto fn = [](int a, OptionalValue<int64_t> b, int64_t c) -> int { return a + c + (b.present ? b.value : 10); }; auto opt_fn = WrapFnToAcceptOptionalArgs(fn); EXPECT_EQ(opt_fn(1, 2, 3), OptionalValue<int>(6)); EXPECT_EQ(opt_fn(std::nullopt, 2, 3), OptionalValue<int>()); EXPECT_EQ(opt_fn(1, std::nullopt, 3), OptionalValue<int>(14)); EXPECT_EQ(opt_fn(1, 2, std::nullopt), OptionalValue<int>()); } { auto fn = [](const Bytes& v) -> const Bytes& { return v; }; auto opt_fn = WrapFnToAcceptOptionalArgs(fn); EXPECT_EQ(opt_fn(Bytes("123")), OptionalValue<Bytes>("123")); } { auto fn = [](absl::string_view v) { return v; }; auto opt_fn = WrapFnToAcceptOptionalArgs(fn); EXPECT_EQ(opt_fn(MakeOptionalValue(Bytes("123"))), MakeOptionalValue(absl::string_view("123"))); } { auto fn = [](int a, OptionalValue<int64_t> b, int64_t c) -> absl::StatusOr<int> { if (c < 0) { return absl::InvalidArgumentError("c < 0"); } else { return a + c + (b.present ? b.value : 10); } }; auto opt_fn = WrapFnToAcceptOptionalArgs(fn); EXPECT_THAT(opt_fn(1, 2, 3), IsOkAndHolds(OptionalValue<int>(6))); EXPECT_THAT(opt_fn(1, 2, -3), StatusIs(absl::StatusCode::kInvalidArgument, "c < 0")); EXPECT_THAT(opt_fn(std::nullopt, 2, -3), IsOkAndHolds(OptionalValue<int>())); } } TEST(OptionalValueReprTest, bool) { EXPECT_EQ(Repr(OptionalValue<bool>(true)), "optional_boolean{true}"); EXPECT_EQ(Repr(OptionalValue<bool>()), "optional_boolean{NA}"); } TEST(OptionalValueReprTest, int32_t) { EXPECT_EQ(Repr(OptionalValue<int32_t>(1)), "optional_int32{1}"); EXPECT_EQ(Repr(OptionalValue<int32_t>()), "optional_int32{NA}"); } TEST(OptionalValueReprTest, int64_t) { EXPECT_EQ(Repr(OptionalValue<int64_t>(1)), "optional_int64{1}"); EXPECT_EQ(Repr(OptionalValue<int64_t>()), "optional_int64{NA}"); } TEST(OptionalValueReprTest, uint64_t) { EXPECT_EQ(Repr(OptionalValue<uint64_t>(1)), "optional_uint64{1}"); EXPECT_EQ(Repr(OptionalValue<uint64_t>()), "optional_uint64{NA}"); } TEST(OptionalValueReprTest, float) { EXPECT_EQ(Repr(OptionalValue<float>(1.5)), "optional_float32{1.5}"); EXPECT_EQ(Repr(OptionalValue<float>()), "optional_float32{NA}"); } TEST(OptionalValueReprTest, double) { EXPECT_EQ(Repr(OptionalValue<double>(1.5)), "optional_float64{1.5}"); EXPECT_EQ(Repr(OptionalValue<double>()), "optional_float64{NA}"); } TEST(OptionalValueReprTest, Bytes) { EXPECT_EQ(Repr(OptionalValue<Bytes>("abc")), "optional_bytes{b'abc'}"); EXPECT_EQ(Repr(OptionalValue<Bytes>()), "optional_bytes{NA}"); } TEST(OptionalValueReprTest, Text) { EXPECT_EQ(Repr(OptionalValue<Text>("abc")), "optional_text{'abc'}"); EXPECT_EQ(Repr(OptionalValue<Text>()), "optional_text{NA}"); } TEST(OptionalValueReprTest, StreamOp) { { std::ostringstream oss; oss << OptionalValue<float>(1.5); EXPECT_EQ(oss.str(), "optional_float32{1.5}"); } { std::ostringstream oss; oss << OptionalValue<float>(); EXPECT_EQ(oss.str(), "optional_float32{NA}"); } } } } }
ReprToken ReprTraits<OptionalValue<double>>::operator()( const OptionalValue<double>& value) const { return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value)) : "optional_float64{NA}"}; }
TEST(OptionalValueReprTest, double) { EXPECT_EQ(Repr(OptionalValue<double>(1.5)), "optional_float64{1.5}"); EXPECT_EQ(Repr(OptionalValue<double>()), "optional_float64{NA}"); }
#include "xla/service/reshape_decomposer.h" #include "absl/status/status.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/service/hlo_creation_utils.h" namespace xla { namespace { class ReshapeDecomposerVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleReshape(HloInstruction* reshape) override { HloInstruction* operand = reshape->mutable_operand(0); auto s = reshape->shape(); auto s0 = operand->shape(); if (ShapeUtil::ReshapeIsBitcast(s, s0)) { auto b = MakeBitcastHlo(operand, s, &operand->metadata()); return ReplaceInstruction(reshape, b); } else if (auto output_aligned_input_shape = ShapeUtil::AlignLayouts(s, s0)) { Shape new_input_shape = *output_aligned_input_shape; HloInstruction* copied_operand = MakeCopyHlo(operand, new_input_shape); VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical " "transpose on the operand: " << copied_operand->ToString(); auto b = MakeBitcastHlo(copied_operand, s, &copied_operand->metadata()); TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, b)); DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape())); } else if (auto input_aligned_output_shape = ShapeUtil::AlignLayouts(s0, s)) { Shape new_output_shape = *input_aligned_output_shape; auto b = MakeBitcastHlo(operand, new_output_shape, &operand->metadata()); DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape())); HloInstruction* copied_result = MakeCopyHlo(b, s); VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical " "transposition on the result: " << copied_result->ToString(); TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, copied_result)); } else { VLOG(3) << "Both input and output of reshape are not alignable, create " "two physical transposes"; auto s0_normalized = ShapeUtil::MakeShapeWithDescendingLayout( s0.element_type(), s0.dimensions()); auto c1 = MakeCopyHlo(reshape->mutable_operand(0), s0_normalized); auto s_normalized = ShapeUtil::MakeShapeWithDescendingLayout( s.element_type(), s.dimensions()); auto b = MakeBitcastHlo(c1, s_normalized, &c1->metadata()); DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape())); auto c2 = MakeCopyHlo(b, s); TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, c2)); } return absl::OkStatus(); } }; } absl::StatusOr<bool> ReshapeDecomposer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { return ReshapeDecomposerVisitor{}.RunOnModule(module, execution_threads); } }
#include "xla/service/reshape_decomposer.h" #include <memory> #include <optional> #include "xla/service/hlo_parser.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { class ReshapeDecomposerTest : public HloTestBase { public: void CheckReshapeDecomposer(const char* hlo, std::optional<absl::string_view> expected) { RunAndFilecheckHloRewrite( hlo, ReshapeDecomposer{}, expected, [&](HloModule* module) { EXPECT_TRUE(absl::c_all_of( module->entry_computation()->instructions(), [&](const HloInstruction* instr) { return instr->opcode() != HloOpcode::kReshape || ShapeUtil::ReshapeIsBitcast(instr->operand(0)->shape(), instr->shape()); })); }); } }; TEST_F(ReshapeDecomposerTest, IsBitcast) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[8]{0} parameter(0) ROOT r = f32[4,2]{1,0} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } TEST_F(ReshapeDecomposerTest, AlignableOutput) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[8,3]{1,0} parameter(0) ROOT r = f32[4,2,3]{0,1,2} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } TEST_F(ReshapeDecomposerTest, AlignableInput) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[4,2,3]{0,1,2} parameter(0) ROOT r = f32[8,3]{1,0} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } TEST_F(ReshapeDecomposerTest, NotAlignable) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[4,2,3,8]{0,2,1,3} parameter(0) ROOT r = f32[8,3,2,4]{0,2,1,3} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } } }
#include "xla/service/reshape_decomposer.h" #include "absl/status/status.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/service/hlo_creation_utils.h" namespace xla { namespace { class ReshapeDecomposerVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleReshape(HloInstruction* reshape) override { HloInstruction* operand = reshape->mutable_operand(0); auto s = reshape->shape(); auto s0 = operand->shape(); if (ShapeUtil::ReshapeIsBitcast(s, s0)) { auto b = MakeBitcastHlo(operand, s, &operand->metadata()); return ReplaceInstruction(reshape, b); } else if (auto output_aligned_input_shape = ShapeUtil::AlignLayouts(s, s0)) { Shape new_input_shape = *output_aligned_input_shape; HloInstruction* copied_operand = MakeCopyHlo(operand, new_input_shape); VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical " "transpose on the operand: " << copied_operand->ToString(); auto b = MakeBitcastHlo(copied_operand, s, &copied_operand->metadata()); TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, b)); DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape())); } else if (auto input_aligned_output_shape = ShapeUtil::AlignLayouts(s0, s)) { Shape new_output_shape = *input_aligned_output_shape; auto b = MakeBitcastHlo(operand, new_output_shape, &operand->metadata()); DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape())); HloInstruction* copied_result = MakeCopyHlo(b, s); VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical " "transposition on the result: " << copied_result->ToString(); TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, copied_result)); } else { VLOG(3) << "Both input and output of reshape are not alignable, create " "two physical transposes"; auto s0_normalized = ShapeUtil::MakeShapeWithDescendingLayout( s0.element_type(), s0.dimensions()); auto c1 = MakeCopyHlo(reshape->mutable_operand(0), s0_normalized); auto s_normalized = ShapeUtil::MakeShapeWithDescendingLayout( s.element_type(), s.dimensions()); auto b = MakeBitcastHlo(c1, s_normalized, &c1->metadata()); DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape())); auto c2 = MakeCopyHlo(b, s); TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, c2)); } return absl::OkStatus(); }
TEST_F(ReshapeDecomposerTest, IsBitcast) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[8]{0} parameter(0) ROOT r = f32[4,2]{1,0} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } TEST_F(ReshapeDecomposerTest, AlignableOutput) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[8,3]{1,0} parameter(0) ROOT r = f32[4,2,3]{0,1,2} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } TEST_F(ReshapeDecomposerTest, AlignableInput) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[4,2,3]{0,1,2} parameter(0) ROOT r = f32[8,3]{1,0} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } TEST_F(ReshapeDecomposerTest, NotAlignable) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[4,2,3,8]{0,2,1,3} parameter(0) ROOT r = f32[8,3,2,4]{0,2,1,3} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); }
#include "tensorflow/core/grappler/optimizers/data/make_deterministic.h" #include <algorithm> #include <utility> #include "absl/container/flat_hash_set.h" #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/grappler/clusters/cluster.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/mutable_graph_view.h" #include "tensorflow/core/grappler/op_types.h" #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h" #include "tensorflow/core/grappler/optimizers/data/function_utils.h" #include "tensorflow/core/grappler/optimizers/data/graph_utils.h" #include "tensorflow/core/grappler/optimizers/data/split_utils.h" #include "tensorflow/core/grappler/utils.h" namespace tensorflow { namespace grappler { namespace { constexpr char kInterleaveOp[] = "InterleaveDataset"; constexpr char kParallelInterleaveOp[] = "ParallelInterleaveDataset"; constexpr char kLegacyParallelInterleaveOp[] = "LegacyParallelInterleaveDatasetV2"; constexpr char kMapOp[] = "MapDataset"; constexpr char kParallelMapOp[] = "ParallelMapDataset"; constexpr char kParallelMapOpV2[] = "ParallelMapDatasetV2"; constexpr char kMapAndBatchOp[] = "MapAndBatchDataset"; constexpr char kBatchOp[] = "BatchDataset"; constexpr char kBatchV2Op[] = "BatchDatasetV2"; constexpr char kParallelBatchOp[] = "ParallelBatchDataset"; constexpr char kPrefetchOp[] = "PrefetchDataset"; constexpr std::array<const char*, 9> kDeterministicStatefulOps = { "TextLineDataset", "FixedLengthRecordDataset", "TFRecordDataset", "TensorSliceDataset", "RangeDataset", "SSTableDataset", "RecordIODataset", "Print", "Assert"}; constexpr std::array<const char*, 13> kDeterministicStatefulOpsWhenAsync = { "RandomUniform", "RandomUniformInt", "RandomStandardNormal", "ParameterizedTruncatedNormal", "TruncatedNormal", "RandomShuffle", "Multinomial", "RandomGamma", "RandomGammaGrad", "RandomPoisson", "RandomCrop", "SampleDistortedBoundingBox", "SampleDistortedBoundingBoxV2"}; bool IsDeterministicWhenRunInParallel(const std::string& stateful_op) { for (auto op_in_array : kDeterministicStatefulOps) { if (data::MatchesAnyVersion(op_in_array, stateful_op)) { return true; } } return false; } bool IsDeterministicWhenRunAsynchronously(const std::string& stateful_op) { for (auto op_in_array : kDeterministicStatefulOps) { if (data::MatchesAnyVersion(op_in_array, stateful_op)) { return true; } } for (auto op_in_array : kDeterministicStatefulOpsWhenAsync) { if (data::MatchesAnyVersion(op_in_array, stateful_op)) { return true; } } return false; } bool IsParallelInterleave(const std::string& op) { return data::MatchesAnyVersion(kParallelInterleaveOp, op) || op == kLegacyParallelInterleaveOp; } bool IsParallelMap(const std::string& op) { return data::MatchesAnyVersion(kParallelMapOp, op); } bool IsParallelBatch(const std::string& op) { return data::MatchesAnyVersion(kParallelBatchOp, op); } bool IsMapAndBatch(const std::string& op) { return data::MatchesAnyVersion(kMapAndBatchOp, op); } bool IsPrefetch(const std::string& op) { return data::MatchesAnyVersion(kPrefetchOp, op); } bool IntroducesFunctionParallelism(const std::string& op) { return IsParallelInterleave(op) || IsParallelMap(op) || IsMapAndBatch(op); } bool IntroducesAsynchrony(const std::string& op) { return IntroducesFunctionParallelism(op) || IsPrefetch(op) || IsParallelBatch(op); } absl::flat_hash_map<absl::string_view, const NodeDef*> NameToNode( const FunctionDef& function) { absl::flat_hash_map<absl::string_view, const NodeDef*> name_to_node; for (const NodeDef& node : function.node_def()) { name_to_node.insert({node.name(), &node}); } return name_to_node; } NodeDef* GetMutableNode(const string& node_name, MutableGraphView* graph) { int index = graph_utils::FindGraphNodeWithName(node_name, *graph->graph()); DCHECK_NE(index, -1) << "Failed to find node " << node_name << " in the optimized graph."; return graph->graph()->mutable_node(index); } Status ConvertMapOrInterleave(const string& node_name, MutableGraphView* graph) { NodeDef* node = GetMutableNode(node_name, graph); auto Targuments = node->attr().find("Targuments"); if (Targuments == node->attr().end()) { return errors::Internal("Failed to find Targuments attribute for node ", node_name); } int num_inputs_after_rewrite; if (IsParallelInterleave(node->op())) { node->set_op(kInterleaveOp); num_inputs_after_rewrite = 3 + Targuments->second.list().type_size(); } else { DCHECK(IsParallelMap(node->op())); node->set_op(kMapOp); num_inputs_after_rewrite = 1 + Targuments->second.list().type_size(); } int inputs_processed = 0; for (int i = 0; i < node->input_size(); i++) { std::string input = node->input(i); if (IsControlInput(input)) { continue; } if (inputs_processed >= num_inputs_after_rewrite) { node->set_input(i, absl::StrCat("^", input)); } inputs_processed++; } if (inputs_processed < num_inputs_after_rewrite) { return errors::Internal("Found only ", inputs_processed, " inputs to node ", node_name, ", but expected to find at least ", num_inputs_after_rewrite); } node->mutable_attr()->erase("deterministic"); node->mutable_attr()->erase("sloppy"); return absl::OkStatus(); } absl::flat_hash_set<absl::string_view> GetAllTransitiveDependencies( const FunctionDef& function_def, const absl::flat_hash_set<absl::string_view>& nodes) { std::vector<absl::string_view> nodes_to_process; std::copy(nodes.begin(), nodes.end(), std::back_inserter(nodes_to_process)); absl::flat_hash_map<absl::string_view, const NodeDef*> name_to_node = NameToNode(function_def); absl::flat_hash_set<absl::string_view> dependencies; while (!nodes_to_process.empty()) { absl::string_view node_name = nodes_to_process.back(); nodes_to_process.pop_back(); if (dependencies.contains(node_name)) { continue; } dependencies.insert(node_name); auto iter = name_to_node.find(node_name); if (iter == name_to_node.end()) { continue; } for (absl::string_view inp : iter->second->input()) { absl::string_view inp_node = inp.substr(0, inp.find(':')); if (inp_node.at(0) == '^') { inp_node = inp_node.substr(1); } if (name_to_node.contains(inp_node)) { nodes_to_process.push_back(inp_node); } } } return dependencies; } Status SplitMap( const FunctionLibraryDefinition& library, const string& map_node_name, MutableGraphView* graph, const absl::flat_hash_set<absl::string_view>& nondeterministic_nodes) { NodeDef* map_node = GetMutableNode(map_node_name, graph); NameAttrList func = map_node->attr().at("f").func(); const FunctionDef* function_def = library.Find(func.name()); if (!function_def) { return errors::Internal("Could not look up function ", func.name(), " in FunctionLibraryDefinition"); } absl::flat_hash_set<absl::string_view> nodes_to_move = GetAllTransitiveDependencies(*function_def, nondeterministic_nodes); VLOG(2) << "Will move nodes to nonparallel function: " << absl::StrJoin(nodes_to_move, ", "); int64_t num_captured_arguments = map_node->attr().find("Targuments")->second.list().type_size(); TF_ASSIGN_OR_RETURN( split_utils::SplitResults split_results, split_utils::SplitFunction(*function_def, nodes_to_move, num_captured_arguments, library)); if (split_results.first_function_output_types.empty()) { return errors::Unimplemented( "The case where the first function has no outputs is unimplemented."); } bool is_map_and_batch = map_node->op() == kMapAndBatchOp; NodeDef* first_map_node_ptr; { NodeDef first_map_node; graph_utils::SetUniqueGraphNodeName( strings::StrCat("make_deterministic_sequential_map/", map_node->name()), graph->graph(), &first_map_node); first_map_node.set_op(kMapOp); int num_control_deps = NumControlInputs(*map_node); int num_extra_inputs = is_map_and_batch ? 3 : 1; int control_deps_index = map_node->input_size() - num_control_deps; int extra_inputs_index = control_deps_index - num_extra_inputs; for (int i = 0; i < extra_inputs_index; i++) { DCHECK(!IsControlInput(map_node->input(i))); first_map_node.add_input(map_node->input(i)); } for (int i = extra_inputs_index; i < control_deps_index; i++) { DCHECK(!IsControlInput(map_node->input(i))); first_map_node.add_input(absl::StrCat("^", map_node->input(i))); } for (int i = control_deps_index; i < map_node->input_size(); i++) { DCHECK(IsControlInput(map_node->input(i))); first_map_node.add_input(map_node->input(i)); } NameAttrList* name_attr_list = (*first_map_node.mutable_attr())["f"].mutable_func(); name_attr_list->set_name(split_results.first_function.signature().name()); graph_utils::CopyAttribute("Targuments", *map_node, &first_map_node); for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) { if (gtl::FindOrNull(map_node->attr(), key)) { graph_utils::CopyAttribute(key, *map_node, &first_map_node); } } AddNodeAttr("output_types", split_results.first_function_output_types, &first_map_node); TensorShapeProto unknown_shape; unknown_shape.set_unknown_rank(true); std::vector<TensorShapeProto> output_shapes( split_results.first_function_output_types.size(), unknown_shape); AddNodeAttr("output_shapes", output_shapes, &first_map_node); first_map_node_ptr = graph->AddNode(std::move(first_map_node)); } NodeDef* second_map_node_ptr; { NodeDef second_map_node; string node_name = map_node->op() == kMapAndBatchOp ? "map_and_batch" : "parallel_map"; graph_utils::SetUniqueGraphNodeName( strings::StrCat("make_deterministic_parallel_", node_name, "/", map_node->name()), graph->graph(), &second_map_node); second_map_node.set_op(map_node->op()); second_map_node.add_input(first_map_node_ptr->name()); for (int i = 1; i < map_node->input_size(); i++) { second_map_node.add_input(map_node->input(i)); } NameAttrList* name_attr_list = (*second_map_node.mutable_attr())["f"].mutable_func(); name_attr_list->set_name(split_results.second_function.signature().name()); graph_utils::CopyAttribute("Targuments", *map_node, &second_map_node); graph_utils::CopyAttribute("output_types", *map_node, &second_map_node); graph_utils::CopyAttribute("output_shapes", *map_node, &second_map_node); if (!is_map_and_batch) { AddNodeAttr("deterministic", "true", &second_map_node); } for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) { if (gtl::FindOrNull(map_node->attr(), key)) { graph_utils::CopyAttribute(key, *map_node, &second_map_node); } } second_map_node_ptr = graph->AddNode(std::move(second_map_node)); } TF_RETURN_IF_ERROR( graph->UpdateFanouts(map_node->name(), second_map_node_ptr->name())); *graph->graph()->mutable_library()->mutable_function()->Add() = split_results.first_function; *graph->graph()->mutable_library()->mutable_function()->Add() = split_results.second_function; return absl::OkStatus(); } Status ConvertBatch(const string& node_name, MutableGraphView* graph) { NodeDef* node = GetMutableNode(node_name, graph); node->set_op(kBatchV2Op); std::string num_parallel_calls_input = node->input(2); node->set_input(2, node->input(3)); node->set_input(3, absl::StrCat("^", num_parallel_calls_input)); node->mutable_attr()->erase("deterministic"); return absl::OkStatus(); } Status ConvertMapAndBatch(const string& node_name, MutableGraphView* graph) { int index = graph_utils::FindGraphNodeWithName(node_name, *graph->graph()); DCHECK_NE(index, -1) << "Failed to find node " << node_name << " in the optimized graph."; const NodeDef& orig_node = graph->graph()->node(index); auto Targuments = orig_node.attr().find("Targuments"); if (Targuments == orig_node.attr().end()) { return errors::Internal("Failed to find Targuments attribute for node ", node_name); } NodeDef new_map_node; new_map_node.set_op(kMapOp); graph_utils::SetUniqueGraphNodeName(kMapOp, graph->graph(), &new_map_node); int num_map_inputs = 1 + Targuments->second.list().type_size(); for (int i = 0; i < num_map_inputs; i++) { new_map_node.add_input(orig_node.input(i)); } for (int i = num_map_inputs; i < orig_node.input_size(); i++) { if (IsControlInput(orig_node.input(i))) { new_map_node.add_input(orig_node.input(i)); } else { new_map_node.add_input(absl::StrCat("^", orig_node.input(i))); } } for (auto key : {"f", "Targuments", "output_types"}) { graph_utils::CopyAttribute(key, orig_node, &new_map_node); } for (auto key : {"preserve_cardinality"}) { if (gtl::FindOrNull(new_map_node.attr(), key)) { graph_utils::CopyAttribute(key, orig_node, &new_map_node); } } auto orig_output_shapes = orig_node.attr().find("output_shapes"); if (orig_output_shapes == orig_node.attr().end()) { return errors::Internal("Failed to find output_shapes attribute for node ", node_name); } AttrValue& map_output_shapes = (*new_map_node.mutable_attr())["output_shapes"]; for (const TensorShapeProto& orig_shape : orig_output_shapes->second.list().shape()) { TensorShapeProto* new_shape = map_output_shapes.mutable_list()->add_shape(); if (orig_shape.unknown_rank()) { new_shape->set_unknown_rank(true); } else if (orig_shape.dim_size() == 0) { return errors::Internal( "Output shape of MapAndBatch node cannot be scalar"); } else { for (int i = 1; i < orig_shape.dim_size(); i++) { *new_shape->add_dim() = orig_shape.dim(i); } } } NodeDef new_batch_node; new_batch_node.set_op(kBatchV2Op); graph_utils::SetUniqueGraphNodeName(kBatchOp, graph->graph(), &new_batch_node); new_batch_node.add_input(new_map_node.name()); new_batch_node.add_input(orig_node.input(num_map_inputs)); new_batch_node.add_input( orig_node.input(num_map_inputs + 2)); graph_utils::CopyShapesAndTypesAttrs(orig_node, &new_batch_node); graph->AddNode(std::move(new_map_node)); NodeDef* graph_batch_node = graph->AddNode(std::move(new_batch_node)); TF_RETURN_IF_ERROR( graph->UpdateFanouts(orig_node.name(), graph_batch_node->name())); return absl::OkStatus(); } Status ConvertPrefetch(const string& node_name, MutableGraphView* graph) { NodeDef* node = GetMutableNode(node_name, graph); constexpr int buffer_size_index = 1; node->add_input(absl::StrCat("^", node->input(buffer_size_index))); NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(0, graph); node->set_input(buffer_size_index, tmp->name()); return absl::OkStatus(); } enum class NondeterminismType { PARALLELISM, ASYNCHRONY }; bool IsDeterministicStatefulOp(NondeterminismType type, const std::string& stateful_op) { return type == NondeterminismType::PARALLELISM ? IsDeterministicWhenRunInParallel(stateful_op) : IsDeterministicWhenRunAsynchronously(stateful_op); } bool FunctionNodeMayIntroduceNondeterminism( const FunctionLibraryDefinition& library, const NodeDef& node_def, NondeterminismType nondeterminism_type, absl::flat_hash_set<std::string>* functions_processed); bool FunctionMayIntroduceNondeterminism( const FunctionLibraryDefinition& library, const std::string& function_name, NondeterminismType nondeterminism_type, absl::flat_hash_set<std::string>* functions_processed, absl::flat_hash_set<absl::string_view>* nondeterministic_nodes) { if (functions_processed->contains(function_name)) { return false; } functions_processed->insert(function_name); const FunctionDef* function_def = library.Find(function_name); if (!function_def) { VLOG(2) << "Could not look up function " << function_name << " in FunctionLibraryDefinition, so rewriting op to be safe"; return true; } bool found = false; for (const NodeDef& node_def : function_def->node_def()) { bool nondeterministic = FunctionNodeMayIntroduceNondeterminism( library, node_def, nondeterminism_type, functions_processed); if (nondeterministic) { if (nondeterministic_nodes) { nondeterministic_nodes->insert(node_def.name()); found = true; } else { return true; } } } return found; } bool FunctionMayIntroduceNondeterminism( const FunctionLibraryDefinition& library, const std::string& function_name, NondeterminismType nondeterminism_type) { absl::flat_hash_set<string> functions_processed; return FunctionMayIntroduceNondeterminism(library, function_name, nondeterminism_type, &functions_processed, nullptr); } bool FunctionNodeMayIntroduceNondeterminism( const FunctionLibraryDefinition& library, const NodeDef& node_def, NondeterminismType nondeterminism_type, absl::flat_hash_set<std::string>* functions_processed) { const OpRegistrationData* op_reg_data = nullptr; Status s = library.LookUp(node_def.op(), &op_reg_data); if (!s.ok()) { VLOG(2) << "Could not look up op " << node_def.op() << " in FunctionLibraryDefinition, so rewriting op to be safe"; return true; } bool is_function_op = op_reg_data->is_function_op; bool is_stateful = false; if (!is_function_op) { const OpDef* op_def; s = OpRegistry::Global()->LookUpOpDef(node_def.op(), &op_def); if (!s.ok()) { VLOG(2) << "Could not look up op " << node_def.op() << " in OpRegistry, so rewriting op to be safe"; return true; } is_stateful = op_def->is_stateful(); } if (is_stateful && !IsStatefulPartitionedCall((node_def)) && !IsIf(node_def) && !IsWhile(node_def) && !IsDeterministicStatefulOp(nondeterminism_type, node_def.op())) { VLOG(2) << "Will rewrite due to op: " << node_def.op(); return true; } std::vector<std::string> attr_func_names; for (const auto& attr : node_def.attr()) { if (attr.second.has_func()) { attr_func_names.push_back(attr.second.func().name()); } for (const auto& name_attr_list : attr.second.list().func()) { attr_func_names.push_back(name_attr_list.name()); } } if (is_function_op) { attr_func_names.push_back(node_def.op()); } for (const std::string& inner_function_name : attr_func_names) { if (FunctionMayIntroduceNondeterminism(library, inner_function_name, nondeterminism_type, functions_processed, nullptr)) { return true; } } return false; } bool NodeMayIntroduceNondeterminismWhenAsync( const FunctionLibraryDefinition& library, const NodeDef& node) { const OpDef* op_def; Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def); if (s.code() == error::NOT_FOUND) { return false; } else if (!s.ok()) { return true; } if (data::DatasetOpKernel::IsDatasetOp(*op_def)) { std::vector<std::string> attr_func_names; for (const auto& attr : node.attr()) { if (attr.second.has_func()) { attr_func_names.push_back(attr.second.func().name()); } for (const auto& name_attr_list : attr.second.list().func()) { attr_func_names.push_back(name_attr_list.name()); } } for (const std::string& inner_function_name : attr_func_names) { if (FunctionMayIntroduceNondeterminism(library, inner_function_name, NondeterminismType::ASYNCHRONY)) { return true; } } } return false; } bool GraphMayHaveAsyncNondeterminism(const FunctionLibraryDefinition& library, const GraphDef& graph) { for (const NodeDef& node : graph.node()) { if (NodeMayIntroduceNondeterminismWhenAsync(library, node)) { return true; } } for (const string& function_name : library.ListFunctionNames()) { const FunctionDef* function_def = library.Find(function_name); CHECK(function_def); for (const NodeDef& node : function_def->node_def()) { if (NodeMayIntroduceNondeterminismWhenAsync(library, node)) { return true; } } } return false; } } Status MakeDeterministic::OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item, GraphDef* output, OptimizationStats* stats) { *output = item.graph; MutableGraphView graph(output); FunctionLibraryDefinition function_library(OpRegistry::Global(), item.graph.library()); absl::flat_hash_set<string> nodes_to_delete; bool remove_async_nodes = GraphMayHaveAsyncNondeterminism(function_library, item.graph); for (const NodeDef& node : item.graph.node()) { if (graph_utils::HasSloppyAttr(node.op())) { NodeDef* mutable_node = GetMutableNode(node.name(), &graph); (*mutable_node->mutable_attr())["sloppy"].set_b(false); stats->num_changes++; } if (graph_utils::HasDeterministicAttr(node.op())) { NodeDef* mutable_node = GetMutableNode(node.name(), &graph); (*mutable_node->mutable_attr())["deterministic"].set_s("true"); stats->num_changes++; } bool rewrite_due_to_async = IntroducesAsynchrony(node.op()) && remove_async_nodes; absl::flat_hash_set<std::string> functions_processed; absl::flat_hash_set<absl::string_view> nondeterministic_nodes; bool rewrite_due_to_parallelism = IntroducesFunctionParallelism(node.op()) && FunctionMayIntroduceNondeterminism( function_library, node.attr().at("f").func().name(), NondeterminismType::PARALLELISM, &functions_processed, &nondeterministic_nodes); if (!rewrite_due_to_async && !rewrite_due_to_parallelism) { continue; } VLOG(1) << "Rewriting node " << node.name() << " (" << node.op() << ") because it introduces nondeterminism through " << (rewrite_due_to_async ? "asynchrony" : "parallelism"); bool maybe_can_split = !rewrite_due_to_async && (node.op() == kParallelMapOpV2 || IsMapAndBatch(node.op())); if (maybe_can_split) { Status s = SplitMap(function_library, node.name(), &graph, nondeterministic_nodes); if (s.ok()) { VLOG(1) << "Split node " << node.name() << " (" << node.op() << ") into two map nodes: a nonparallel version and a " "parallel version."; nodes_to_delete.insert(node.name()); continue; } else if (s.code() == error::UNIMPLEMENTED) { VLOG(1) << "Could not move stateful ops to their own function, so will " "convert node " << node.name() << " to a nonparallel version instead. Reason: " << s; } else { return s; } } if (IsPrefetch(node.op())) { TF_RETURN_IF_ERROR(ConvertPrefetch(node.name(), &graph)); } else if (IsMapAndBatch(node.op())) { TF_RETURN_IF_ERROR(ConvertMapAndBatch(node.name(), &graph)); nodes_to_delete.insert(node.name()); } else if (IsParallelBatch(node.op())) { TF_RETURN_IF_ERROR(ConvertBatch(node.name(), &graph)); } else { DCHECK(IsParallelInterleave(node.op()) || IsParallelMap(node.op())); TF_RETURN_IF_ERROR(ConvertMapOrInterleave(node.name(), &graph)); } stats->num_changes++; } TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete)); return absl::OkStatus(); } REGISTER_GRAPH_OPTIMIZER_AS(MakeDeterministic, "make_deterministic"); } }
#include "tensorflow/core/grappler/optimizers/data/make_deterministic.h" #include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h" #include "tensorflow/core/grappler/optimizers/data/graph_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace grappler { namespace { std::vector<string> GetNodeNames(const FunctionDef& func) { std::vector<string> node_names; for (const NodeDef& node : func.node_def()) { node_names.push_back(node.name()); } return node_names; } class SplitMapTest : public ::testing::TestWithParam<std::tuple<bool, bool>> {}; TEST_P(SplitMapTest, SplitMapFunction) { using test::function::NDef; GrapplerItem item; bool deterministic, rewrite_map_and_batch; std::tie(deterministic, rewrite_map_and_batch) = GetParam(); if (deterministic && rewrite_map_and_batch) { LOG(INFO) << "Skipping test because MapAndBatch does not have " "'deterministic' attribute"; return; } FunctionDef orig_func_def = FunctionDefHelper::Create( "MyFunction", {"a1: float", "a2: float", "a3: double"}, {"o1: float", "o2: double"}, {}, { {{"i1"}, "Identity", {"a2"}, {{"T", DT_FLOAT}}}, {{"i2"}, "Identity", {"i1:output"}, {{"T", DT_FLOAT}}}, {{"stateful"}, "SampleDistortedBoundingBox", {"a1", "i2:output"}, {{"T", DT_FLOAT}}}, {{"i3"}, "Identity", {"stateful:bboxes:0"}, {{"T", DT_FLOAT}}}, {{"i4"}, "Identity", {"a3"}, {{"T", DT_DOUBLE}}}, }, {{"o1", "i3:output"}, {"o2", "i4:output"}}); NodeDef orig_map_node_def; if (rewrite_map_and_batch) { orig_map_node_def = graph_tests_utils::MakeMapAndBatchNode( "map", "range", "batch_size", "num_parallel_calls", "drop_remainder", "MyFunction"); } else { orig_map_node_def = graph_tests_utils::MakeParallelMapV2Node( "map", "range", "num_parallel_calls", "MyFunction", deterministic ? "true" : "false"); } orig_map_node_def.add_input("^start"); AttrValue* attr_val = &(*orig_map_node_def.mutable_attr())["Targuments"]; SetAttrValue(std::vector<DataType>{DT_DOUBLE}, attr_val); (*orig_map_node_def.mutable_attr())["preserve_cardinality"].set_b(true); attr_val = &(*orig_map_node_def.mutable_attr())["output_types"]; SetAttrValue(std::vector<DataType>{DT_FLOAT, DT_DOUBLE}, attr_val); attr_val = &(*orig_map_node_def.mutable_attr())["output_shapes"]; SetAttrValue(std::vector<TensorShape>{{1}, {1}}, attr_val); item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("num_parallel_calls", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}), orig_map_node_def}, {orig_func_def}); MakeDeterministic optimizer; GraphDef output; VLOG(1) << "GraphDef before optimization:\n" << item.graph.DebugString() << "\n\n"; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); VLOG(1) << "GraphDef after optimization:\n" << output.DebugString() << "\n\n"; int index = graph_utils::FindGraphNodeWithOp("MapDataset", output); ASSERT_GE(index, 0); NodeDef first_map_node_def = output.node(index); if (rewrite_map_and_batch) { ASSERT_THAT( first_map_node_def.input(), ::testing::ElementsAre("range", "^batch_size", "^num_parallel_calls", "^drop_remainder", "^start")); } else { ASSERT_THAT( first_map_node_def.input(), ::testing::ElementsAre("range", "^num_parallel_calls", "^start")); } std::vector<DataType> t_arguments; TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "Targuments", &t_arguments)); ASSERT_THAT(t_arguments, ::testing::ElementsAre(DT_DOUBLE)); std::vector<DataType> output_types; TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "output_types", &output_types)); ASSERT_THAT(output_types, ::testing::ElementsAre(DT_FLOAT)); std::vector<TensorShapeProto> output_shapes; TF_ASSERT_OK( GetNodeAttr(first_map_node_def, "output_shapes", &output_shapes)); for (const TensorShapeProto& shape : output_shapes) { ASSERT_TRUE(shape.unknown_rank()); } bool preserve_cardinality; TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "preserve_cardinality", &preserve_cardinality)); ASSERT_TRUE(preserve_cardinality); NameAttrList f; TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "f", &f)); ASSERT_EQ(f.attr_size(), 0); index = graph_utils::FindGraphFunctionWithName(f.name(), output.library()); CHECK_GE(index, 0); FunctionDef first_func = output.library().function(index); ASSERT_TRUE(first_func.signature().is_stateful()); ASSERT_THAT(GetNodeNames(first_func), ::testing::UnorderedElementsAre("i1", "i2", "stateful")); NodeDef second_map_node_def; if (rewrite_map_and_batch) { index = graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output); CHECK_GE(index, 0); second_map_node_def = output.node(index); ASSERT_THAT(second_map_node_def.input(), ::testing::ElementsAre(first_map_node_def.name(), "batch_size", "num_parallel_calls", "drop_remainder", "^start")); } else { index = graph_utils::FindGraphNodeWithOp("ParallelMapDatasetV2", output); CHECK_GE(index, 0); second_map_node_def = output.node(index); ASSERT_THAT(second_map_node_def.input(), ::testing::ElementsAre(first_map_node_def.name(), "num_parallel_calls", "^start")); ASSERT_EQ(second_map_node_def.attr().at("deterministic").s(), "true"); } t_arguments.clear(); TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "Targuments", &t_arguments)); ASSERT_THAT(t_arguments, ::testing::ElementsAre(DT_DOUBLE)); output_types.clear(); TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "output_types", &output_types)); ASSERT_THAT(output_types, ::testing::ElementsAre(DT_FLOAT, DT_DOUBLE)); output_shapes.clear(); TF_ASSERT_OK( GetNodeAttr(first_map_node_def, "output_shapes", &output_shapes)); for (const TensorShapeProto& shape : output_shapes) { ASSERT_EQ(shape.dim_size(), 0); } TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "preserve_cardinality", &preserve_cardinality)); ASSERT_TRUE(preserve_cardinality); TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "f", &f)); ASSERT_EQ(f.attr_size(), 0); index = graph_utils::FindGraphFunctionWithName(f.name(), output.library()); CHECK_GE(index, 0); FunctionDef second_func = output.library().function(index); ASSERT_THAT(GetNodeNames(second_func), ::testing::UnorderedElementsAre("i3", "i4")); } INSTANTIATE_TEST_SUITE_P(Test, SplitMapTest, ::testing::Combine(::testing::Bool(), ::testing::Bool())); FunctionDef OuterXTimesTwo() { return FunctionDefHelper::Define( "OuterXTimesTwo", {"x: float"}, {"y: float"}, {}, {{{"y"}, "PartitionedCall", {"x"}, {{"Tin", DataTypeSlice{DT_FLOAT}}, {"Tout", DataTypeSlice{DT_FLOAT}}, {"f", FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_FLOAT}})}}}}); } FunctionDef OuterRandomUniform() { return FunctionDefHelper::Define( "OuterRandomUniform", {"x: float"}, {"random_uniform: int64"}, {}, {{{"random_uniform"}, "StatefulPartitionedCall", {"x"}, {{"Tin", DataTypeSlice{DT_FLOAT}}, {"Tout", DataTypeSlice{DT_INT64}}, {"f", FunctionDefHelper::FunctionRef("RandomUniformFn", {{"T", DT_FLOAT}})}}}}); } FunctionDef OuterReadResourceVariable() { return FunctionDefHelper::Define( "OuterReadResourceVariable", {"x: resource"}, {"y: float"}, {}, {{{"y"}, "StatefulPartitionedCall", {"x"}, {{"Tin", DataTypeSlice{DT_RESOURCE}}, {"Tout", DataTypeSlice{DT_FLOAT}}, {"f", FunctionDefHelper::FunctionRef("ReadResourceVariable", {})}}}}); } class MakeDeterministicTest : public ::testing::TestWithParam<std::tuple<bool, bool>> {}; TEST_P(MakeDeterministicTest, NoRewriteInterleave) { using test::function::NDef; GrapplerItem item; bool nest, deterministic; std::tie(nest, deterministic) = GetParam(); std::string func_name = nest ? "OuterXTimesTwo" : "XTimesTwo"; item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("num_parallel_calls", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), graph_tests_utils::MakeParallelInterleaveV2Node( "interleave", "range", "cycle_length", "block_length", "num_parallel_calls", func_name, !deterministic)}, {test::function::XTimesTwo(), OuterXTimesTwo()}); MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithName("interleave", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.op(), "ParallelInterleaveDatasetV2"); ASSERT_EQ(node_def.attr().at("sloppy").b(), false); } TEST_P(MakeDeterministicTest, NoRewriteMap) { using test::function::NDef; GrapplerItem item; bool nest, deterministic; std::tie(nest, deterministic) = GetParam(); std::string func_name = nest ? "OuterXTimesTwo" : "XTimesTwo"; item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("num_parallel_calls", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), graph_tests_utils::MakeParallelMapV2Node( "map", "range", "num_parallel_calls", func_name, deterministic ? "true" : "false")}, {test::function::XTimesTwo(), OuterXTimesTwo()}); MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithName("map", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.op(), "ParallelMapDatasetV2"); ASSERT_EQ(node_def.attr().at("deterministic").s(), "true"); } TEST_P(MakeDeterministicTest, NoRewriteBatch) { using test::function::NDef; typedef FunctionDefHelper FDH; GrapplerItem item; bool nest, deterministic; std::tie(nest, deterministic) = GetParam(); std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn"; item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}), NDef("num_parallel_calls", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}), NDef("drop_remainder", "Const", {}, {{"value", false}, {"dtype", DT_BOOL}}), graph_tests_utils::MakeMapNode("map", "range", func_name), graph_tests_utils::MakeParallelBatchNode( "batch", "map", "batch_size", "num_parallel_calls", "drop_remainder", deterministic ? "true" : "false")}, {test::function::RandomUniform(), OuterRandomUniform()}); MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithName("batch", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.op(), "ParallelBatchDataset"); ASSERT_EQ(node_def.attr().at("deterministic").s(), "true"); } TEST_P(MakeDeterministicTest, NoRewritePrefetch) { using test::function::NDef; typedef FunctionDefHelper FDH; GrapplerItem item; bool nest, deterministic; std::tie(nest, deterministic) = GetParam(); std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn"; item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("num_parallel_calls", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("buffer_size", "Const", {}, {{"value", Tensor(int64_t{1})}, {"dtype", DT_INT64}}), graph_tests_utils::MakeParallelMapV2Node( "map", "range", "num_parallel_calls", func_name, deterministic ? "true" : "false"), graph_tests_utils::MakePrefetchNode("prefetch", "map", "buffer_size")}, {test::function::RandomUniform(), OuterRandomUniform()}); MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithName("prefetch", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.op(), "PrefetchDataset"); ASSERT_EQ(node_def.input_size(), 2); ASSERT_THAT(node_def.input(0), ::testing::EndsWith("map")); ASSERT_EQ(node_def.input(1), "buffer_size"); NodeDef buffer_size = output.node(graph_utils::FindGraphNodeWithName("buffer_size", output)); EXPECT_EQ(buffer_size.attr().at("value").tensor().int64_val(0), 1); } TEST_P(MakeDeterministicTest, RewriteInterleave) { using test::function::NDef; typedef FunctionDefHelper FDH; GrapplerItem item; bool nest, deterministic; std::tie(nest, deterministic) = GetParam(); std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn"; NodeDef interleave_node_def = graph_tests_utils::MakeParallelInterleaveV2Node( "interleave", "range", "cycle_length", "block_length", "num_parallel_calls", func_name, !deterministic); interleave_node_def.add_input("^start"); item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("num_parallel_calls", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}), interleave_node_def}, {test::function::RandomUniform(), OuterRandomUniform()}); MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithOp("InterleaveDataset", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.input_size(), 5); ASSERT_EQ(node_def.input(0), "range"); ASSERT_EQ(node_def.input(1), "cycle_length"); ASSERT_EQ(node_def.input(2), "block_length"); ASSERT_EQ(node_def.input(3), "^num_parallel_calls"); ASSERT_EQ(node_def.input(4), "^start"); } enum CannotSplitReason { FUNC_HAS_ATTR, ASYNC_NONDETERMINISM }; class RewriteMapWithoutSplitTest : public ::testing::TestWithParam< std::tuple<bool, bool, CannotSplitReason>> {}; TEST_P(RewriteMapWithoutSplitTest, RewriteMapWithoutSplit) { using test::function::NDef; typedef FunctionDefHelper FDH; GrapplerItem item; bool nest, deterministic; CannotSplitReason reason; std::tie(nest, deterministic, reason) = GetParam(); FunctionDef func; FunctionDef outer_func; if (reason == FUNC_HAS_ATTR) { func = test::function::RandomUniform(); (*func.mutable_attr())["test_attr"].set_s("test_value"); outer_func = OuterRandomUniform(); (*outer_func.mutable_attr())["test_attr"].set_s("test_value"); } else { func = test::function::ReadResourceVariable(); outer_func = OuterReadResourceVariable(); } std::string func_name = nest ? outer_func.signature().name() : func.signature().name(); NodeDef map_node_def = graph_tests_utils::MakeParallelMapV2Node( "map", "range", "num_parallel_calls", func_name, deterministic ? "true" : "false"); map_node_def.add_input("^start"); item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("num_parallel_calls", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}), map_node_def}, {func, outer_func}); VLOG(1) << "Orig graph: \n" << item.graph.DebugString() << "\n\n"; MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithOp("MapDataset", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.input_size(), 3); ASSERT_EQ(node_def.input(0), "range"); ASSERT_EQ(node_def.input(1), "^num_parallel_calls"); ASSERT_EQ(node_def.input(2), "^start"); NameAttrList f; TF_ASSERT_OK(GetNodeAttr(node_def, "f", &f)); ASSERT_EQ(f.name(), func_name); ASSERT_FALSE(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output)); } TEST_P(MakeDeterministicTest, RewriteBatch) { using test::function::NDef; typedef FunctionDefHelper FDH; GrapplerItem item; bool nest, deterministic; std::tie(nest, deterministic) = GetParam(); std::string func_name = nest ? "OuterReadResourceVariable" : "ReadResourceVariable"; NodeDef batch_node_def = graph_tests_utils::MakeParallelBatchNode( "batch", "map", "batch_size", "num_parallel_calls", "drop_remainder", deterministic ? "true" : "false"); batch_node_def.add_input("^start"); item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}), NDef("num_parallel_calls", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}), NDef("drop_remainder", "Const", {}, {{"value", false}, {"dtype", DT_BOOL}}), graph_tests_utils::MakeMapNode("map", "range", func_name), batch_node_def}, {test::function::ReadResourceVariable(), OuterReadResourceVariable()}); MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithOp("BatchDatasetV2", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.input_size(), 5); ASSERT_EQ(node_def.input(0), "map"); ASSERT_EQ(node_def.input(1), "batch_size"); ASSERT_EQ(node_def.input(2), "drop_remainder"); ASSERT_EQ(node_def.input(3), "^num_parallel_calls"); ASSERT_EQ(node_def.input(4), "^start"); ASSERT_EQ(node_def.attr().count("deterministic"), 0); } TEST_P(MakeDeterministicTest, RewritePrefetch) { using test::function::NDef; typedef FunctionDefHelper FDH; GrapplerItem item; bool nest, deterministic; std::tie(nest, deterministic) = GetParam(); std::string func_name = nest ? "OuterReadResourceVariable" : "ReadResourceVariable"; item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("num_parallel_calls", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("buffer_size", "Const", {}, {{"value", Tensor(int64_t{1})}, {"dtype", DT_INT64}}), graph_tests_utils::MakeParallelMapV2Node( "map", "range", "num_parallel_calls", func_name, deterministic ? "true" : "false"), graph_tests_utils::MakePrefetchNode("prefetch", "map", "buffer_size")}, {test::function::ReadResourceVariable(), OuterReadResourceVariable()}); MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithName("prefetch", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.op(), "PrefetchDataset"); ASSERT_EQ(node_def.input_size(), 3); ASSERT_THAT(node_def.input(0), ::testing::EndsWith("map")); ASSERT_EQ(node_def.input(2), "^buffer_size"); NodeDef buffer_size = output.node( graph_utils::FindGraphNodeWithName(node_def.input(1), output)); EXPECT_EQ(buffer_size.attr().at("value").tensor().int64_val(0), 0); } INSTANTIATE_TEST_SUITE_P(Test, MakeDeterministicTest, ::testing::Combine(::testing::Bool(), ::testing::Bool())); INSTANTIATE_TEST_SUITE_P( Test, RewriteMapWithoutSplitTest, ::testing::Combine(::testing::Bool(), ::testing::Bool(), ::testing::Values(FUNC_HAS_ATTR, ASYNC_NONDETERMINISM))); TEST(NoRewriteMapAndBatchTest, NoRewriteMapAndBatch) { using test::function::NDef; GrapplerItem item; item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT64}}), NDef("num_parallel_calls", "Const", {}, {{"value", 2}, {"dtype", DT_INT64}}), NDef("drop_remainder", "Const", {}, {{"value", false}, {"dtype", DT_BOOL}}), graph_tests_utils::MakeMapAndBatchNode( "map_and_batch", "range", "batch_size", "num_parallel_calls", "drop_remainder", "XTimesTwo")}, {test::function::XTimesTwo()}); MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithName("map_and_batch", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.input_size(), 4); ASSERT_EQ(node_def.input(0), "range"); ASSERT_EQ(node_def.input(1), "batch_size"); ASSERT_EQ(node_def.input(2), "num_parallel_calls"); ASSERT_EQ(node_def.input(3), "drop_remainder"); } class RewriteMapAndBatchWithoutSplitTest : public ::testing::TestWithParam<std::tuple<bool, CannotSplitReason>> {}; TEST_P(RewriteMapAndBatchWithoutSplitTest, RewriteMapAndBatchWithoutSplit) { using test::function::NDef; GrapplerItem item; bool nest; CannotSplitReason reason; std::tie(nest, reason) = GetParam(); FunctionDef func; if (reason == FUNC_HAS_ATTR) { func = test::function::RandomUniform(); (*func.mutable_attr())["test_attr"].set_s("test_value"); } else { func = test::function::ReadResourceVariable(); } NodeDef map_and_batch_node_def = graph_tests_utils::MakeMapAndBatchNode( "map_and_batch", "range", "batch_size", "num_parallel_calls", "drop_remainder", func.signature().name()); SetAttrValue( absl::Span<const PartialTensorShape>{ {2}, {-1, 3, -1}, PartialTensorShape()}, &(*map_and_batch_node_def.mutable_attr())["output_shapes"]); item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT64}}), NDef("num_parallel_calls", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}), NDef("drop_remainder", "Const", {}, {{"value", false}, {"dtype", DT_BOOL}}), map_and_batch_node_def}, {func}); VLOG(1) << "Orig graph: \n" << item.graph.DebugString() << "\n\n"; MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); ASSERT_FALSE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output)); int index = graph_utils::FindGraphNodeWithOp("MapDataset", output); ASSERT_GE(index, 0); NodeDef map_node_def = output.node(index); ASSERT_EQ(map_node_def.input_size(), 4); ASSERT_EQ(map_node_def.input(0), "range"); ASSERT_EQ(map_node_def.input(1), "^batch_size"); ASSERT_EQ(map_node_def.input(2), "^num_parallel_calls"); ASSERT_EQ(map_node_def.input(3), "^drop_remainder"); ASSERT_TRUE(AreAttrValuesEqual(map_and_batch_node_def.attr().at("f"), map_node_def.attr().at("f"))); ASSERT_TRUE(AreAttrValuesEqual(map_and_batch_node_def.attr().at("Targuments"), map_node_def.attr().at("Targuments"))); ASSERT_TRUE( AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_types"), map_node_def.attr().at("output_types"))); ASSERT_EQ(map_node_def.attr().at("output_shapes").list().shape_size(), 3); ASSERT_TRUE(PartialTensorShape({}).IsIdenticalTo( map_node_def.attr().at("output_shapes").list().shape(0))); ASSERT_TRUE(PartialTensorShape({3, -1}).IsIdenticalTo( map_node_def.attr().at("output_shapes").list().shape(1))); ASSERT_TRUE(PartialTensorShape().IsIdenticalTo( map_node_def.attr().at("output_shapes").list().shape(2))); index = graph_utils::FindGraphNodeWithOp("BatchDatasetV2", output); ASSERT_GE(index, 0); NodeDef batch_node_def = output.node(index); ASSERT_EQ(batch_node_def.input_size(), 3); ASSERT_EQ(batch_node_def.input(0), map_node_def.name()); ASSERT_EQ(batch_node_def.input(1), "batch_size"); ASSERT_EQ(batch_node_def.input(2), "drop_remainder"); ASSERT_TRUE( AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_types"), batch_node_def.attr().at("output_types"))); ASSERT_TRUE( AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_shapes"), batch_node_def.attr().at("output_shapes"))); } INSTANTIATE_TEST_SUITE_P( Test, RewriteMapAndBatchWithoutSplitTest, ::testing::Combine(::testing::Bool(), ::testing::Values(FUNC_HAS_ATTR, ASYNC_NONDETERMINISM))); } } }
bool IsPrefetch(const std::string& op) { return data::MatchesAnyVersion(kPrefetchOp, op); }
TEST_P(MakeDeterministicTest, RewritePrefetch) { using test::function::NDef; typedef FunctionDefHelper FDH; GrapplerItem item; bool nest, deterministic; std::tie(nest, deterministic) = GetParam(); std::string func_name = nest ? "OuterReadResourceVariable" : "ReadResourceVariable"; item.graph = test::function::GDef( {NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}), NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}), NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {}), NDef("num_parallel_calls", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("buffer_size", "Const", {}, {{"value", Tensor(int64_t{1})}, {"dtype", DT_INT64}}), graph_tests_utils::MakeParallelMapV2Node( "map", "range", "num_parallel_calls", func_name, deterministic ? "true" : "false"), graph_tests_utils::MakePrefetchNode("prefetch", "map", "buffer_size")}, {test::function::ReadResourceVariable(), OuterReadResourceVariable()}); MakeDeterministic optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); int index = graph_utils::FindGraphNodeWithName("prefetch", output); ASSERT_GE(index, 0); NodeDef node_def = output.node(index); ASSERT_EQ(node_def.op(), "PrefetchDataset"); ASSERT_EQ(node_def.input_size(), 3); ASSERT_THAT(node_def.input(0), ::testing::EndsWith("map")); ASSERT_EQ(node_def.input(2), "^buffer_size"); NodeDef buffer_size = output.node( graph_utils::FindGraphNodeWithName(node_def.input(1), output)); EXPECT_EQ(buffer_size.attr().at("value").tensor().int64_val(0), 0); }
#include "xla/tools/xla_compile_lib.h" #include <cmath> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "google/protobuf/duration.pb.h" #include "absl/cleanup/cleanup.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/DialectRegistry.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/OwningOpRef.h" #include "mlir/Parser/Parser.h" #include "stablehlo/dialect/Register.h" #include "xla/client/xla_computation.h" #include "xla/debug_options_flags.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_module_group.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/pjrt/mlir_to_hlo.h" #include "xla/service/compiler.h" #include "xla/service/cpu/cpu_compiler.h" #include "xla/service/cpu/cpu_executable.h" #include "xla/service/executable.h" #include "xla/service/export_hlo.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_module_config.h" #include "xla/service/symbol_repository.h" #include "xla/service/xla_compile_result.pb.h" #include "xla/shape.h" #include "xla/stream_executor/device_memory_allocator.h" #include "xla/stream_executor/stream_executor.h" #include "xla/stream_executor/stream_executor_memory_allocator.h" #include "xla/tools/hlo_module_loader.h" #include "xla/util.h" #include "tsl/platform/env.h" #include "tsl/platform/env_time.h" #include "tsl/platform/errors.h" #include "tsl/platform/path.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/status.h" #include "tsl/platform/status_to_from_proto.h" #include "tsl/platform/statusor.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "xla/service/gpu/autotuner_util.h" #include "xla/service/gpu/executable.pb.h" #include "xla/service/gpu/gpu_symbol_repository.h" #include "xla/stream_executor/gpu/gpu_init.h" #endif #if GOOGLE_CUDA #include "xla/service/gpu/nvptx_compiler.h" #elif TENSORFLOW_USE_ROCM #include "xla/service/gpu/amdgpu_compiler.h" #endif namespace xla { static absl::StatusOr<std::string> AotCompileCpuExecutable( std::unique_ptr<HloModule> hlo_module) { cpu::CpuCompiler cpu_compiler; auto module_group = std::make_unique<HloModuleGroup>(std::move(hlo_module)); TF_ASSIGN_OR_RETURN( std::vector<std::unique_ptr<Executable>> executables, cpu_compiler.Compile(std::move(module_group), {{nullptr}}, {nullptr})); TF_ASSIGN_OR_RETURN(std::unique_ptr<AotCompilationResult> aot_result, cpu_compiler.Export(executables[0].get())); return aot_result->SerializeAsString(); } static absl::StatusOr<std::string> CompileGpuExecutable( std::unique_ptr<HloModule> hlo_module, std::optional<Compiler::TargetConfig> target_config, CompilationResult& result) { #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM const bool aot = target_config.has_value(); #if GOOGLE_CUDA auto gpu_compiler = gpu::NVPTXCompiler(); #elif TENSORFLOW_USE_ROCM auto gpu_compiler = gpu::AMDGPUCompiler(); #endif auto module_group = std::make_unique<HloModuleGroup>(std::move(hlo_module)); if (aot) { AotCompilationOptions aot_options(gpu_compiler.PlatformId()); aot_options.set_target_config(*target_config); aot_options.set_run_backend_only(true); TF_ASSIGN_OR_RETURN( std::vector<std::unique_ptr<AotCompilationResult>> aot_results, gpu_compiler.CompileAheadOfTime(std::move(module_group), aot_options)); TF_ASSIGN_OR_RETURN(std::string compile_result, aot_results[0]->SerializeAsString()); *result.mutable_hlo_module() = aot_results[0]->optimized_module()->ToProto(); return compile_result; } Compiler::CompileOptions compile_options; TF_RETURN_IF_ERROR(stream_executor::ValidateGPUMachineManager()); TF_ASSIGN_OR_RETURN( stream_executor::StreamExecutor * stream_executor, stream_executor::GPUMachineManager()->ExecutorForDevice(0)); auto allocator = std::make_unique<stream_executor::StreamExecutorMemoryAllocator>( stream_executor); compile_options.device_allocator = allocator.get(); TF_ASSIGN_OR_RETURN( std::vector<std::unique_ptr<Executable>> executables, gpu_compiler.Compile(std::move(module_group), {{stream_executor}}, compile_options)); *result.mutable_hlo_module() = executables[0]->module().ToProto(); return executables[0]->module().ToString(); #else LOG(ERROR) << "Neither ROCm nor CUDA present; returning empty."; return ""; #endif } absl::StatusOr<std::string> CompileExecutable( std::unique_ptr<HloModule> hlo_module, BackendType backend, std::optional<Compiler::TargetConfig> target_config, CompilationResult& result) { if (backend == BackendType::kCpu) { return AotCompileCpuExecutable(std::move(hlo_module)); } return CompileGpuExecutable(std::move(hlo_module), std::move(target_config), result); } absl::Status WriteResultFile(const absl::string_view result_output_file, TimerStats& stats, CompilationResult& compilation_result) { if (result_output_file.empty()) { return absl::OkStatus(); } absl::MutexLock ml(&stats.stats_mutex); const double secs = std::floor(stats.cumulative_secs); const double nanos = (stats.cumulative_secs - secs) * tsl::EnvTime::kSecondsToNanos; google::protobuf::Duration duration; duration.set_seconds(secs); duration.set_nanos(nanos); *compilation_result.mutable_perf_stats()->mutable_compilation_duration() = duration; *compilation_result.mutable_perf_stats()->mutable_total_duration() = duration; return tsl::WriteBinaryProto( tsl::Env::Default(), std::string(result_output_file), compilation_result); } absl::StatusOr<std::unique_ptr<HloModule>> LoadModule( const absl::string_view module_path) { auto format = std::string(tsl::io::Extension(module_path)); if (format == "hlo" || format == "txt" || format == "pb") { return LoadModuleFromFile( std::string(module_path), format, hlo_module_loader_details::Config(), [&](HloModuleConfig* c) {}, nullptr); } std::string module_string; TF_RETURN_IF_ERROR(tsl::ReadFileToString( tsl::Env::Default(), std::string(module_path), &module_string)); mlir::DialectRegistry dialects; dialects.insert<mlir::arith::ArithDialect>(); dialects.insert<mlir::mhlo::MhloDialect>(); dialects.insert<mlir::func::FuncDialect>(); mlir::stablehlo::registerAllDialects(dialects); auto threading = mlir::MLIRContext::Threading::DISABLED; auto ctx = std::make_unique<mlir::MLIRContext>(dialects, threading); mlir::OwningOpRef<mlir::ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(module_string, ctx.get()); XlaComputation xla_computation; TF_RETURN_IF_ERROR( MlirToXlaComputation(*module, xla_computation, false, false)); HloModuleProto hlo_module_proto = xla_computation.proto(); TF_ASSIGN_OR_RETURN(ProgramShape shape, xla_computation.GetProgramShape()); DebugOptions debug_options = GetDebugOptionsFromFlags(); HloModuleConfig config(shape); config.set_debug_options(debug_options); return HloModule::CreateFromProto(hlo_module_proto, config); } static absl::StatusOr<std::unique_ptr<HloModuleAndMetadata>> ReadModuleFromSymbolRepo(absl::string_view symbol_repo, absl::string_view symbol_reference, BackendType backend) { std::unique_ptr<HloModuleAndMetadata> mod; TF_ASSIGN_OR_RETURN( mod, LookupSymbolInRepository(symbol_repo, symbol_reference, backend)); if (mod == nullptr) { return absl::NotFoundError( absl::StrCat("Could not find ", symbol_reference, " in ", symbol_repo)); } return mod; } static absl::StatusOr<bool> LoadAutotuneDataFromModule( HloModuleAndMetadata* mod, BackendType backend) { if (backend == BackendType::kGpu) { #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM if (auto* data = static_cast<gpu::GpuBackendSpecificData*>( mod->backend_specific_data.get()); data != nullptr && data->autotune_results.has_value()) { TF_RETURN_IF_ERROR( gpu::AutotunerUtil::LoadAutotuneResults(*data->autotune_results)); return true; } #endif } return false; } static std::unique_ptr<Compiler::TargetConfig> ReadTargetConfigFromModule( HloModuleAndMetadata* mod, BackendType backend) { if (backend == BackendType::kGpu) { #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM if (auto* data = static_cast<gpu::GpuBackendSpecificData*>( mod->backend_specific_data.get()); data != nullptr) { return std::move(mod->target_config); } #endif } return nullptr; } absl::Status XlaCompileMain(const XlaCompileOptions& options) { std::unique_ptr<HloModule> hlo_module; std::unique_ptr<Compiler::TargetConfig> target_config; if (options.platform != "cpu" && options.platform != "gpu") { return absl::UnimplementedError( absl::StrCat("platform", options.platform, " is not supported")); } const BackendType backend = (options.platform == "gpu" ? BackendType::kGpu : BackendType::kCpu); absl::string_view symbol_repo = options.repo_options.symbol_repo; if (absl::string_view symbol_id = options.repo_options.symbol_id; !symbol_id.empty()) { TF_ASSIGN_OR_RETURN( std::unique_ptr<HloModuleAndMetadata> mod, ReadModuleFromSymbolRepo(symbol_repo, symbol_id, backend)); hlo_module = std::move(mod->hlo_module); target_config = ReadTargetConfigFromModule(mod.get(), backend); } else { TF_ASSIGN_OR_RETURN(hlo_module, LoadModule(options.module_path)); } #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM bool found_autotune = false; #endif if (absl::string_view optimized_symbol_id = options.repo_options.optimized_symbol_id; !optimized_symbol_id.empty()) { TF_ASSIGN_OR_RETURN( std::unique_ptr<HloModuleAndMetadata> optimized_mod, ReadModuleFromSymbolRepo(symbol_repo, optimized_symbol_id, backend)); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM TF_ASSIGN_OR_RETURN(found_autotune, LoadAutotuneDataFromModule( optimized_mod.get(), backend)); #endif } xla::TimerStats stats; xla::ScopedLoggingTimer timer("compilation", true, "xla_compile_main.cc", 1, &stats); CompilationResult compilation_result; absl::Cleanup cleanup([&] { timer.StopAndLog(); if (!options.result_output_file.empty()) { TF_QCHECK_OK(WriteResultFile(options.result_output_file, stats, compilation_result)); } }); std::optional<Compiler::TargetConfig> cfg = std::nullopt; if (backend == BackendType::kGpu) { #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM if (absl::string_view gpu_target_config_path = options.gpu_options.gpu_target_config_path; !gpu_target_config_path.empty()) { std::string gpu_target_config_string; TF_RETURN_IF_ERROR(tsl::ReadFileToString( tsl::Env::Default(), std::string(gpu_target_config_path), &gpu_target_config_string)); stream_executor::GpuTargetConfigProto gpu_target_config_proto; if (!tsl::protobuf::TextFormat::ParseFromString( gpu_target_config_string, &gpu_target_config_proto)) { return FailedPrecondition("Failed to parse GpuTargetConfigProto"); } target_config = std::make_unique<Compiler::TargetConfig>(gpu_target_config_proto); if (absl::string_view autotune_results_path = options.gpu_options.autotune_results_path; !found_autotune && !autotune_results_path.empty()) { TF_RETURN_IF_ERROR(gpu::AutotunerUtil::LoadAutotuneResultsFromFile( autotune_results_path)); } } cfg = (options.gpu_options.use_attached_device) ? std::nullopt : std::make_optional(*std::move(target_config)); #endif } auto result = CompileExecutable(std::move(hlo_module), backend, std::move(cfg), compilation_result); *compilation_result.mutable_status() = tsl::StatusToProto(result.status()); if (!result.ok()) { return result.status(); } TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(), options.output_path, *result)); if (options.repo_options.wait_for_uploads) { MaybeWaitForUploads(); } return absl::OkStatus(); } }
#include "xla/tools/xla_compile_lib.h" #include <memory> #include <optional> #include <string> #include <utility> #include "google/protobuf/duration.pb.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/platform_util.h" #include "xla/service/symbol_repository.h" #include "xla/service/xla_compile_result.pb.h" #include "xla/stream_executor/device_description.pb.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_macros.h" #include "xla/util.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/env.h" #include "tsl/platform/env_time.h" #include "tsl/platform/path.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #include "tsl/protobuf/error_codes.pb.h" #include "tsl/protobuf/status.pb.h" namespace xla { namespace { using ::testing::IsEmpty; using ::testing::IsNull; using ::testing::Not; using ::tsl::testing::IsOk; using ::tsl::testing::IsOkAndHolds; using ::tsl::testing::StatusIs; #if XLA_TEST_BACKEND_CPU static constexpr absl::string_view kPlatformName = "Host"; #elif XLA_TEST_BACKEND_GPU static constexpr absl::string_view kPlatformName = #if TENSORFLOW_USE_ROCM "ROCM"; #else "CUDA"; #endif #endif class XlaCompileLibTest : public HloTestBase { protected: XlaCompileLibTest() : HloTestBase(*PlatformUtil::GetPlatform(std::string(kPlatformName)), GetReferencePlatform()) {} void SetUp() override { const std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "data", "add.hlo"); std::string hlo; TF_ASSERT_OK(tsl::ReadFileToString(tsl::Env::Default(), hlo_path, &hlo)); TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo)); } std::unique_ptr<HloModule> module_; }; TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(CompilesForCpu)) { CompilationResult result; EXPECT_THAT(CompileExecutable(std::move(module_), BackendType::kCpu, std::nullopt, result), IsOkAndHolds(Not(IsEmpty()))); } TEST_F(XlaCompileLibTest, DISABLED_ON_CPU(CompilesForGpuWithDevice)) { CompilationResult result; EXPECT_THAT(CompileExecutable(std::move(module_), BackendType::kGpu, std::nullopt, result), IsOkAndHolds(Not(IsEmpty()))); EXPECT_TRUE(result.has_hlo_module()) << result.DebugString(); } TEST_F(XlaCompileLibTest, DISABLED_ON_CPU(CompilesForGpuWithoutDevice)) { const std::string target_config_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "xla_aot_compile_test_gpu_target_config.prototxt"); stream_executor::GpuTargetConfigProto target_config; TF_ASSERT_OK(tsl::ReadTextProto(tsl::Env::Default(), target_config_path, &target_config)); CompilationResult result; EXPECT_THAT(CompileExecutable(std::move(module_), BackendType::kGpu, std::nullopt, result), IsOkAndHolds(Not(IsEmpty()))); EXPECT_TRUE(result.has_hlo_module()) << result.DebugString(); } TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(ErrorsOnUnexpectedPlatform)) { XlaCompileOptions options; options.platform = "tpu"; EXPECT_THAT(XlaCompileMain(options), StatusIs(tsl::error::UNIMPLEMENTED)); } TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(WriteResultFilePropagatesErrors)) { TimerStats stats; CompilationResult result; EXPECT_THAT(WriteResultFile("/does/not/exist", stats, result), Not(IsOk())); } TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(WriteResultFileWritesTheFile)) { std::string result_output_file; ASSERT_TRUE(tsl::Env::Default()->LocalTempFilename(&result_output_file)); TimerStats stats; { absl::MutexLock ml(&stats.stats_mutex); stats.cumulative_secs = 5.5; stats.max_secs = 5.5; } CompilationResult result; google::protobuf::Duration duration; duration.set_seconds(5); duration.set_nanos(0.5 * tsl::EnvTime::kSecondsToNanos); *result.mutable_perf_stats()->mutable_compilation_duration() = duration; *result.mutable_perf_stats()->mutable_total_duration() = duration; TF_ASSERT_OK(WriteResultFile(result_output_file, stats, result)); CompilationResult got_result; TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), result_output_file, &got_result)); EXPECT_EQ(5, got_result.perf_stats().compilation_duration().seconds()); EXPECT_EQ(0.5 * tsl::EnvTime::kSecondsToNanos, got_result.perf_stats().compilation_duration().nanos()); EXPECT_EQ(5, got_result.perf_stats().total_duration().seconds()); EXPECT_EQ(0.5 * tsl::EnvTime::kSecondsToNanos, got_result.perf_stats().total_duration().nanos()); } TEST_F(XlaCompileLibTest, LoadModuleErrors) { EXPECT_THAT(LoadModule("/does/not/exist"), Not(IsOk())); } TEST_F(XlaCompileLibTest, LoadModuleLoadsTextFormat) { const std::string module_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt"); TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file, module_->ToString())); EXPECT_THAT(LoadModule(module_file), IsOkAndHolds(Not(IsNull()))); } TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(MainForCpu)) { const std::string module_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt"); TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file, module_->ToString())); const std::string output_path = tsl::io::JoinPath(tsl::testing::TmpDir(), "cpu_output"); const std::string result_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "cpu_result.pb"); XlaCompileOptions options; options.module_path = module_file; options.output_path = output_path; options.platform = "cpu"; options.result_output_file = result_file; TF_EXPECT_OK(XlaCompileMain(options)); CompilationResult result; TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), result_file, &result)); EXPECT_TRUE(result.has_status()); EXPECT_EQ(result.status().code(), tensorflow::error::OK); } TEST_F(XlaCompileLibTest, DISABLED_ON_CPU(MainForGpu)) { const std::string module_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt"); TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file, module_->ToString())); const std::string output_path = tsl::io::JoinPath(tsl::testing::TmpDir(), "gpu_output"); const std::string result_file = tsl::io::JoinPath(tsl::testing::TmpDir(), "gpu_result.pb"); XlaCompileOptions options; options.module_path = module_file; options.output_path = output_path; options.platform = "gpu"; options.result_output_file = result_file; options.gpu_options.use_attached_device = true; TF_EXPECT_OK(XlaCompileMain(options)); CompilationResult result; TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), result_file, &result)); EXPECT_TRUE(result.has_status()); EXPECT_EQ(result.status().code(), tensorflow::error::OK); } } }
absl::Status WriteResultFile(const absl::string_view result_output_file, TimerStats& stats, CompilationResult& compilation_result) { if (result_output_file.empty()) { return absl::OkStatus(); } absl::MutexLock ml(&stats.stats_mutex); const double secs = std::floor(stats.cumulative_secs); const double nanos = (stats.cumulative_secs - secs) * tsl::EnvTime::kSecondsToNanos; google::protobuf::Duration duration; duration.set_seconds(secs); duration.set_nanos(nanos); *compilation_result.mutable_perf_stats()->mutable_compilation_duration() = duration; *compilation_result.mutable_perf_stats()->mutable_total_duration() = duration; return tsl::WriteBinaryProto( tsl::Env::Default(), std::string(result_output_file), compilation_result); }
#include "xla/python/ifrt/device.h" #include <atomic> #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/base/optimization.h" #include "absl/hash/hash.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "xla/python/ifrt/device.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace ifrt { char Device::ID = 0; DeviceList::DeviceList(Devices devices) : hash_(kUnsetHash) { if (devices.size() <= kInlineDeviceSize) { state_ = State{std::move(devices)}; } else { state_ = std::make_shared<State>(State{std::move(devices)}); } } DeviceList::DeviceList(const DeviceList& other) : state_(other.state_), hash_(other.hash_.load(std::memory_order_relaxed)) {} DeviceList::DeviceList(DeviceList&& other) : state_(std::move(other.state_)), hash_(other.hash_.load(std::memory_order_relaxed)) {} DeviceList& DeviceList::operator=(const DeviceList& other) { state_ = other.state_; hash_.store(other.hash_.load(std::memory_order_relaxed), std::memory_order_relaxed); return *this; } DeviceList& DeviceList::operator=(DeviceList&& other) { state_ = std::move(other.state_); hash_.store(other.hash_.load(std::memory_order_relaxed), std::memory_order_relaxed); return *this; } absl::StatusOr<DeviceList> DeviceList::FromProto(LookupDeviceFunc lookup_device, const DeviceListProto& proto) { DeviceList::Devices devices; devices.reserve(proto.device_ids_size()); for (int device_id : proto.device_ids()) { TF_ASSIGN_OR_RETURN(Device * device, lookup_device(DeviceId(device_id))); devices.push_back(device); } return DeviceList(std::move(devices)); } DeviceListProto DeviceList::ToProto() const { DeviceListProto proto; proto.mutable_device_ids()->Reserve(devices().size()); for (Device* device : devices()) { proto.mutable_device_ids()->AddAlreadyReserved(device->Id().value()); } return proto; } uint64_t DeviceList::hash() const { uint64_t hash = hash_.load(std::memory_order_relaxed); if (ABSL_PREDICT_FALSE(hash == kUnsetHash)) { hash = absl::HashOf(devices()); if (ABSL_PREDICT_FALSE(hash == kUnsetHash)) { ++hash; } hash_.store(hash, std::memory_order_relaxed); } return hash; } std::string DeviceList::DebugString() const { return absl::StrCat("[", absl::StrJoin(devices(), ",", [](std::string* out, Device* device) { absl::StrAppend(out, device->DebugString()); }), "]"); } std::vector<DeviceId> GetDeviceIds(DeviceList device_list) { std::vector<DeviceId> ids; ids.reserve(device_list.devices().size()); for (const Device* device : device_list.devices()) { ids.push_back(device->Id()); } return ids; } } }
#include "xla/python/ifrt/device.h" #include <algorithm> #include <cstdint> #include <utility> #include <vector> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "absl/synchronization/blocking_counter.h" #include "xla/python/ifrt/device.pb.h" #include "xla/python/ifrt/sharding_test_util.h" #include "tsl/platform/cpu_info.h" #include "tsl/platform/env.h" #include "tsl/platform/statusor.h" #include "tsl/platform/threadpool.h" namespace xla { namespace ifrt { namespace { class DeviceListTest : public test_util::ShardingTest {}; TEST_P(DeviceListTest, ToFromProto) { auto device_list = GetDevices({0, 1}); DeviceListProto proto = device_list.ToProto(); auto lookup_device_func = [&](DeviceId device_id) -> absl::StatusOr<Device*> { return client()->LookupDevice(device_id); }; TF_ASSERT_OK_AND_ASSIGN(auto device_list_copy, DeviceList::FromProto(lookup_device_func, proto)); EXPECT_EQ(device_list_copy, device_list); } TEST_P(DeviceListTest, IdenticalHashFromConcurrentCalls) { auto device_list = GetDevices({0, 1}); const int num_threads = 16; absl::BlockingCounter counter(num_threads); tsl::thread::ThreadPool thread_pool( tsl::Env::Default(), tsl::ThreadOptions(), "test_pool", std::min(num_threads, tsl::port::MaxParallelism())); std::vector<uint64_t> hashes(num_threads); for (int i = 0; i < num_threads; ++i) { thread_pool.Schedule([&, i]() { hashes[i] = device_list.hash(); counter.DecrementCount(); }); } counter.Wait(); for (int i = 0; i < num_threads; ++i) { EXPECT_EQ(hashes[i], device_list.hash()); } EXPECT_NE(device_list.hash(), 0); } TEST_P(DeviceListTest, EqualityTest) { auto device_list1 = GetDevices({0, 1}); auto device_list2 = GetDevices({0, 1}); EXPECT_EQ(device_list1, device_list2); auto device_list3 = device_list1; EXPECT_EQ(device_list1, device_list3); auto device_list4 = std::move(device_list2); EXPECT_EQ(device_list1, device_list4); auto device_list5 = GetDevices({0}); EXPECT_NE(device_list1, device_list5); auto device_list6 = GetDevices({1, 0}); EXPECT_NE(device_list1, device_list6); } INSTANTIATE_TEST_SUITE_P(NumDevices, DeviceListTest, testing::Values(test_util::ShardingTestParam{ 2, 2})); } } }
DeviceList::DeviceList(DeviceList&& other) : state_(std::move(other.state_)), hash_(other.hash_.load(std::memory_order_relaxed)) {}
TEST_P(DeviceListTest, EqualityTest) { auto device_list1 = GetDevices({0, 1}); auto device_list2 = GetDevices({0, 1}); EXPECT_EQ(device_list1, device_list2); auto device_list3 = device_list1; EXPECT_EQ(device_list1, device_list3); auto device_list4 = std::move(device_list2); EXPECT_EQ(device_list1, device_list4); auto device_list5 = GetDevices({0}); EXPECT_NE(device_list1, device_list5); auto device_list6 = GetDevices({1, 0}); EXPECT_NE(device_list1, device_list6); }
#include "tensorflow/core/data/service/snapshot/path_utils.h" #include <cstdint> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "tsl/platform/path.h" namespace tensorflow { namespace data { namespace { constexpr const char kDoneFileName[] = "DONE"; constexpr const char kErrorFileName[] = "ERROR"; constexpr const char kWorkerFileName[] = "owner_worker"; constexpr const char kSnapshotMetadataFileName[] = "snapshot.metadata"; constexpr const char kDatasetDefFileName[] = "dataset_def.proto"; constexpr const char kDatasetSpecFileName[] = "dataset_spec.pb"; constexpr const char kStreamsDirectoryName[] = "streams"; constexpr const char kSplitsDirectoryName[] = "splits"; constexpr const char kCheckpointsDirectoryName[] = "checkpoints"; constexpr const char kCommittedChunksDirectoryName[] = "chunks"; constexpr const char kUncommittedChunksDirectoryName[] = "uncommitted_chunks"; constexpr int64_t kUnknownNumElements = -1; } std::string StreamsDirectory(absl::string_view snapshot_path) { return tsl::io::JoinPath(snapshot_path, kStreamsDirectoryName); } std::string StreamDirectory(absl::string_view snapshot_path, int64_t stream_index) { return tsl::io::JoinPath(StreamsDirectory(snapshot_path), absl::StrCat("stream_", stream_index)); } std::string SplitsDirectory(absl::string_view snapshot_path, int64_t stream_index) { return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index), kSplitsDirectoryName); } std::string SourceDirectory(absl::string_view snapshot_path, int64_t stream_index, int64_t source_index) { return tsl::io::JoinPath(SplitsDirectory(snapshot_path, stream_index), absl::StrCat("source_", source_index)); } std::string RepetitionDirectory(absl::string_view snapshot_path, int64_t stream_index, int64_t source_index, int64_t repetition_index) { return tsl::io::JoinPath( SourceDirectory(snapshot_path, stream_index, source_index), absl::StrCat("repetition_", repetition_index)); } std::string SplitPath(absl::string_view snapshot_path, int64_t stream_index, int64_t source_index, int64_t repetition_index, int64_t local_index, int64_t global_index) { return tsl::io::JoinPath( RepetitionDirectory(snapshot_path, stream_index, source_index, repetition_index), absl::StrCat("split_", local_index, "_", global_index)); } absl::StatusOr<int64_t> ParseStreamDirectoryName( absl::string_view stream_directory_name) { std::vector<std::string> tokens = absl::StrSplit(stream_directory_name, '_'); int64_t stream_index = 0; if (tokens.size() != 2 || tokens[0] != "stream" || !absl::SimpleAtoi(tokens[1], &stream_index) || stream_index < 0) { return absl::InvalidArgumentError( absl::StrCat("Invalid stream directory name: ", stream_directory_name, ". Expected stream_<stream_index>.")); } return stream_index; } absl::StatusOr<int64_t> ParseSourceDirectoryName( absl::string_view source_directory_name) { std::vector<std::string> tokens = absl::StrSplit(source_directory_name, '_'); int64_t source_index = 0; if (tokens.size() != 2 || tokens[0] != "source" || !absl::SimpleAtoi(tokens[1], &source_index) || source_index < 0) { return absl::InvalidArgumentError( absl::StrCat("Invalid source directory name: ", source_directory_name, ". Expected source_<source_index>.")); } return source_index; } absl::StatusOr<int64_t> ParseRepetitionDirectoryName( absl::string_view repetition_directory_name) { std::vector<std::string> tokens = absl::StrSplit(repetition_directory_name, '_'); int64_t repetition_index = 0; if (tokens.size() != 2 || tokens[0] != "repetition" || !absl::SimpleAtoi(tokens[1], &repetition_index) || repetition_index < 0) { return absl::InvalidArgumentError(absl::StrCat( "Invalid repetition directory name: ", repetition_directory_name, ". Expected repetition_<repetition_index>.")); } return repetition_index; } absl::StatusOr<std::pair<int64_t, int64_t>> ParseSplitFilename( absl::string_view split_filename) { std::vector<std::string> tokens = absl::StrSplit(tsl::io::Basename(split_filename), '_'); int64_t local_split_index = 0, global_split_index = 0; if (tokens.size() != 3 || tokens[0] != "split" || !absl::SimpleAtoi(tokens[1], &local_split_index) || local_split_index < 0 || !absl::SimpleAtoi(tokens[2], &global_split_index) || global_split_index < 0) { return absl::InvalidArgumentError(absl::StrCat( "Invalid split file name: ", split_filename, ". Expected split_<local_split_index>_<global_split_index>.")); } if (local_split_index > global_split_index) { return absl::InvalidArgumentError(absl::StrCat( "Invalid split file name: ", split_filename, ". The local split index ", local_split_index, " exceeds the global split index ", global_split_index, ".")); } return std::make_pair(local_split_index, global_split_index); } absl::StatusOr<std::pair<int64_t, int64_t>> ParseCheckpointFilename( absl::string_view checkpoint_filename) { std::vector<std::string> tokens = absl::StrSplit(checkpoint_filename, '_'); int64_t checkpoint_index = 0, checkpoint_num_elements = 0; if (tokens.size() != 3 || tokens[0] != "checkpoint" || !absl::SimpleAtoi(tokens[1], &checkpoint_index) || checkpoint_index < 0 || !absl::SimpleAtoi(tokens[2], &checkpoint_num_elements) || (checkpoint_num_elements < 0 && checkpoint_num_elements != kUnknownNumElements)) { return absl::InvalidArgumentError(absl::StrCat( "Invalid checkpoint file name: ", checkpoint_filename, ". Expected checkpoint_<checkpoint_index>_<checkpoint_num_elements>.")); } return std::make_pair(checkpoint_index, checkpoint_num_elements); } absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> ParseChunkFilename( absl::string_view chunk_filename) { std::vector<std::string> tokens = absl::StrSplit(chunk_filename, '_'); int64_t stream_index = 0, stream_chunk_index = 0, chunk_num_elements = 0; if (tokens.size() != 4 || tokens[0] != "chunk" || !absl::SimpleAtoi(tokens[1], &stream_index) || stream_index < 0 || !absl::SimpleAtoi(tokens[2], &stream_chunk_index) || stream_chunk_index < 0 || !absl::SimpleAtoi(tokens[3], &chunk_num_elements) || (chunk_num_elements < 0 && chunk_num_elements != kUnknownNumElements)) { return absl::InvalidArgumentError(absl::StrCat( "Invalid chunk file name: ", chunk_filename, ". Expected " "chunk_<stream_index>_<stream_chunk_index>_<chunk_num_elements>.")); } return std::make_tuple(stream_index, stream_chunk_index, chunk_num_elements); } std::string SnapshotMetadataFilePath(absl::string_view snapshot_path_) { return tsl::io::JoinPath(snapshot_path_, kSnapshotMetadataFileName); } std::string DatasetDefFilePath(absl::string_view snapshot_path_) { return tsl::io::JoinPath(snapshot_path_, kDatasetDefFileName); } std::string DatasetSpecFilePath(absl::string_view snapshot_path_) { return tsl::io::JoinPath(snapshot_path_, kDatasetSpecFileName); } std::string StreamDoneFilePath(absl::string_view snapshot_path, int64_t stream_index) { return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index), kDoneFileName); } std::string StreamWorkerFilePath(absl::string_view snapshot_path, int64_t stream_index) { return StreamWorkerFilePath(StreamDirectory(snapshot_path, stream_index)); } std::string StreamWorkerFilePath(absl::string_view stream_path) { return tsl::io::JoinPath(stream_path, kWorkerFileName); } std::string SnapshotDoneFilePath(absl::string_view snapshot_path) { return tsl::io::JoinPath(snapshot_path, kDoneFileName); } std::string SnapshotErrorFilePath(absl::string_view snapshot_path) { return tsl::io::JoinPath(snapshot_path, kErrorFileName); } std::string CheckpointsDirectory(absl::string_view snapshot_path, int64_t stream_index) { return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index), kCheckpointsDirectoryName); } std::string CommittedChunksDirectory(absl::string_view snapshot_path) { return tsl::io::JoinPath(snapshot_path, kCommittedChunksDirectoryName); } std::string UncommittedChunksDirectory(absl::string_view snapshot_path, int64_t stream_index) { return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index), kUncommittedChunksDirectoryName); } } }
#include "tensorflow/core/data/service/snapshot/path_utils.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/test.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace data { namespace { using ::testing::FieldsAre; using ::testing::HasSubstr; using ::testing::MatchesRegex; using ::testing::Pair; using tsl::testing::IsOkAndHolds; using tsl::testing::StatusIs; TEST(PathUtilsTest, StreamsDirectory) { EXPECT_THAT(StreamsDirectory("/path/to/snapshot"), MatchesRegex("/path/to/snapshot.streams")); } TEST(PathUtilsTest, StreamDirectory) { EXPECT_THAT(StreamDirectory("/path/to/snapshot", 0), MatchesRegex("/path/to/snapshot.streams.stream_0")); } TEST(PathUtilsTest, SplitsDirectory) { EXPECT_THAT(SplitsDirectory("/path/to/snapshot", 0), MatchesRegex("/path/to/snapshot.streams.stream_0.splits")); } TEST(PathUtilsTest, SourceDirectory) { EXPECT_THAT( SourceDirectory("/path/to/snapshot", 0, 1), MatchesRegex("/path/to/snapshot.streams.stream_0.splits.source_1")); } TEST(PathUtilsTest, RepetitionDirectory) { EXPECT_THAT( RepetitionDirectory("/path/to/snapshot", 0, 1, 2), MatchesRegex( "/path/to/snapshot.streams.stream_0.splits.source_1.repetition_2")); } TEST(PathUtilsTest, SplitPath) { EXPECT_THAT( SplitPath("/path/to/snapshot", 0, 1, 2, 3, 4), MatchesRegex( "/path/to/" "snapshot.streams.stream_0.splits.source_1.repetition_2.split_3_4")); } TEST(PathUtilsTest, ParseStreamDirectoryName) { EXPECT_THAT(ParseStreamDirectoryName("stream_1"), IsOkAndHolds(1)); } TEST(PathUtilsTest, ParseSourceDirectoryName) { EXPECT_THAT(ParseSourceDirectoryName("source_1"), IsOkAndHolds(1)); EXPECT_THAT(ParseSourceDirectoryName(""), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected source_<source_index>"))); EXPECT_THAT(ParseSourceDirectoryName("source_-1"), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected source_<source_index>"))); EXPECT_THAT(ParseSourceDirectoryName("chunk_1"), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected source_<source_index>"))); } TEST(PathUtilsTest, ParseRepetitionDirectoryName) { EXPECT_THAT(ParseRepetitionDirectoryName("repetition_1"), IsOkAndHolds(1)); EXPECT_THAT(ParseRepetitionDirectoryName(""), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected repetition_<repetition_index>"))); EXPECT_THAT(ParseRepetitionDirectoryName("repetition_-1"), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected repetition_<repetition_index>"))); EXPECT_THAT(ParseRepetitionDirectoryName("chunk_1"), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected repetition_<repetition_index>"))); } TEST(PathUtilsTest, InvalidStreamDirectoryName) { EXPECT_THAT(ParseStreamDirectoryName(""), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected stream_<stream_index>"))); EXPECT_THAT(ParseStreamDirectoryName("stream_-1"), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected stream_<stream_index>"))); EXPECT_THAT(ParseStreamDirectoryName("chunk_1"), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected stream_<stream_index>"))); } TEST(PathUtilsTest, ParseSplitFilename) { EXPECT_THAT(ParseSplitFilename("split_0_1"), IsOkAndHolds(Pair(0, 1))); } TEST(PathUtilsTest, InvalidSplitFilename) { EXPECT_THAT( ParseSplitFilename(""), StatusIs(error::INVALID_ARGUMENT, HasSubstr( "Expected split_<local_split_index>_<global_split_index>"))); EXPECT_THAT( ParseSplitFilename("split_123"), StatusIs(error::INVALID_ARGUMENT, HasSubstr( "Expected split_<local_split_index>_<global_split_index>"))); EXPECT_THAT( ParseSplitFilename("split_-1_(-1)"), StatusIs(error::INVALID_ARGUMENT, HasSubstr( "Expected split_<local_split_index>_<global_split_index>"))); EXPECT_THAT( ParseSplitFilename("chunk_1_2"), StatusIs(error::INVALID_ARGUMENT, HasSubstr( "Expected split_<local_split_index>_<global_split_index>"))); EXPECT_THAT( ParseSplitFilename("split_5_0"), StatusIs( error::INVALID_ARGUMENT, HasSubstr( "The local split index 5 exceeds the global split index 0"))); } TEST(PathUtilsTest, ParseCheckpointFilename) { EXPECT_THAT(ParseCheckpointFilename("checkpoint_0_1"), IsOkAndHolds(Pair(0, 1))); EXPECT_THAT(ParseCheckpointFilename("checkpoint_0_-1"), IsOkAndHolds(Pair(0, -1))); } TEST(PathUtilsTest, InvalidCheckpointFilename) { EXPECT_THAT( ParseCheckpointFilename(""), StatusIs(error::INVALID_ARGUMENT, HasSubstr( "Expected " "checkpoint_<checkpoint_index>_<checkpoint_num_elements>"))); EXPECT_THAT( ParseCheckpointFilename("checkpoint_123"), StatusIs(error::INVALID_ARGUMENT, HasSubstr( "Expected " "checkpoint_<checkpoint_index>_<checkpoint_num_elements>"))); EXPECT_THAT( ParseCheckpointFilename("checkpoint_-1_(-1)"), StatusIs(error::INVALID_ARGUMENT, HasSubstr( "Expected " "checkpoint_<checkpoint_index>_<checkpoint_num_elements>"))); EXPECT_THAT( ParseCheckpointFilename("chunk_1_2"), StatusIs(error::INVALID_ARGUMENT, HasSubstr( "Expected " "checkpoint_<checkpoint_index>_<checkpoint_num_elements>"))); } TEST(PathUtilsTest, ParseChunkFilename) { EXPECT_THAT(ParseChunkFilename("chunk_0_1_2"), IsOkAndHolds(FieldsAre(0, 1, 2))); EXPECT_THAT(ParseChunkFilename("chunk_0_1_-1"), IsOkAndHolds(FieldsAre(0, 1, -1))); } TEST(PathUtilsTest, InvalidChunkFilename) { EXPECT_THAT(ParseChunkFilename(""), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected " "chunk_<stream_index>_<stream_chunk_index>_<" "chunk_num_elements>"))); EXPECT_THAT(ParseChunkFilename("chunk_123_0"), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected " "chunk_<stream_index>_<stream_chunk_index>_<" "chunk_num_elements>"))); EXPECT_THAT(ParseChunkFilename("chunk_-1_(-1)_0"), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected " "chunk_<stream_index>_<stream_chunk_index>_<" "chunk_num_elements>"))); EXPECT_THAT(ParseChunkFilename("split_1_2_3"), StatusIs(error::INVALID_ARGUMENT, HasSubstr("Expected " "chunk_<stream_index>_<stream_chunk_index>_<" "chunk_num_elements>"))); } TEST(PathUtilsTest, StreamDoneFilePath) { EXPECT_THAT(StreamDoneFilePath("/path/to/snapshot", 0), MatchesRegex("/path/to/snapshot.streams.stream_0.DONE")); } TEST(PathUtilsTest, StreamWorkerFilePath) { EXPECT_THAT(StreamWorkerFilePath("/path/to/snapshot", 0), MatchesRegex("/path/to/snapshot.streams.stream_0.owner_worker")); EXPECT_THAT(StreamWorkerFilePath("/path/to/snapshot/streams/stream_0"), MatchesRegex("/path/to/snapshot.streams.stream_0.owner_worker")); } TEST(PathUtilsTest, SnapshotDoneFilePath) { EXPECT_THAT(SnapshotDoneFilePath("/path/to/snapshot"), MatchesRegex("/path/to/snapshot.DONE")); } TEST(PathUtilsTest, SnapshotErrorFilePath) { EXPECT_THAT(SnapshotErrorFilePath("/path/to/snapshot"), MatchesRegex("/path/to/snapshot.ERROR")); } TEST(PathUtilsTest, SnapshotMetadataFilePath) { EXPECT_THAT(SnapshotMetadataFilePath("/path/to/snapshot"), MatchesRegex("/path/to/snapshot.snapshot.metadata")); } TEST(PathUtilsTest, DatasetDefFilePath) { EXPECT_THAT(DatasetDefFilePath("/path/to/snapshot"), MatchesRegex("/path/to/snapshot.dataset_def.proto")); } TEST(PathUtilsTest, DatasetSpefFilePath) { EXPECT_THAT(DatasetSpecFilePath("/path/to/snapshot"), MatchesRegex("/path/to/snapshot.dataset_spec.pb")); } TEST(PathUtilsTest, CheckpointsDirectory) { EXPECT_THAT(CheckpointsDirectory("/path/to/snapshot", 0), MatchesRegex("/path/to/snapshot.streams.stream_0.checkpoints")); } TEST(PathUtilsTest, CommittedChunksDirectory) { EXPECT_THAT(CommittedChunksDirectory("/path/to/snapshot"), MatchesRegex("/path/to/snapshot.chunks")); } TEST(PathUtilsTest, UncommittedChunksDirectory) { EXPECT_THAT( UncommittedChunksDirectory("/path/to/snapshot", 0), MatchesRegex("/path/to/snapshot.streams.stream_0.uncommitted_chunks")); } } } }
std::string SplitPath(absl::string_view snapshot_path, int64_t stream_index, int64_t source_index, int64_t repetition_index, int64_t local_index, int64_t global_index) { return tsl::io::JoinPath( RepetitionDirectory(snapshot_path, stream_index, source_index, repetition_index), absl::StrCat("split_", local_index, "_", global_index)); }
TEST(PathUtilsTest, SplitPath) { EXPECT_THAT( SplitPath("/path/to/snapshot", 0, 1, 2, 3, 4), MatchesRegex( "/path/to/" "snapshot.streams.stream_0.splits.source_1.repetition_2.split_3_4")); }
#include <algorithm> #include <vector> #include "tensorflow/cc/framework/scope_internal.h" #include "tensorflow/core/common_runtime/shape_refiner.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/strings/str_util.h" namespace tensorflow { Scope::Scope(Impl* impl) : impl_(impl) {} Scope::Scope(const Scope& other) : impl_(new Impl(*other.impl())) {} Scope::~Scope() {} Scope& Scope::operator=(const Scope& other) { impl_.reset(new Impl(*other.impl_)); return *this; } namespace { const char kScopeSeparator[] = "/"; const char kSuffixSeparator[] = "_"; } Scope::Impl::Impl(Graph* graph, Status* status, NameMap* name_map, ShapeRefiner* refiner, bool disable_shape_inference) : graph_(graph), status_(status), name_map_(name_map), refiner_(refiner), scope_used_(nullptr), colocation_constraints_(), disable_shape_inference_(disable_shape_inference) {} Scope::Impl::Impl(const std::shared_ptr<Graph>& graph, const std::shared_ptr<Status>& status, const std::shared_ptr<NameMap>& name_map, const std::shared_ptr<ShapeRefiner>& refiner) : graph_(graph), status_(status), name_map_(name_map), refiner_(refiner), scope_used_(nullptr), colocation_constraints_(), disable_shape_inference_(refiner_ == nullptr) {} Scope Scope::NewRootScope() { Graph* graph = new Graph(OpRegistry::Global()); ShapeRefiner* refiner = new ShapeRefiner(graph->versions(), graph->op_registry()); return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner, false)); } Scope Scope::DisabledShapeInferenceScope() { Graph* graph = new Graph(OpRegistry::Global()); ShapeRefiner* refiner = new ShapeRefiner(graph->versions(), graph->op_registry()); return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner, true)); } Scope::Impl::Impl(const Scope& other, Tags::ScopeName, const string& name, bool copy_names) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(copy_names ? other.impl()->name_map_ : std::shared_ptr<NameMap>(new NameMap)), refiner_(other.impl()->refiner_), scope_used_(nullptr), control_deps_(other.impl()->control_deps_), name_(name), op_name_(""), exit_on_error_(other.impl()->exit_on_error_), kernel_label_(other.impl()->kernel_label_), device_(other.impl()->device_), assigned_device_(other.impl()->assigned_device_), xla_cluster_(other.impl()->xla_cluster_), colocation_constraints_(other.impl()->colocation_constraints_), disable_shape_inference_(other.impl()->disable_shape_inference_) {} Scope::Impl::Impl(const Scope& other, Tags::OpName, const string& name, const string& op_name) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(other.impl()->name_map_), refiner_(other.impl()->refiner_), scope_used_(other.impl()->scope_used_), control_deps_(other.impl()->control_deps_), name_(name), op_name_(op_name), exit_on_error_(other.impl()->exit_on_error_), kernel_label_(other.impl()->kernel_label_), device_(other.impl()->device_), assigned_device_(other.impl()->assigned_device_), xla_cluster_(other.impl()->xla_cluster_), colocation_constraints_(other.impl()->colocation_constraints_), disable_shape_inference_(other.impl()->disable_shape_inference_) {} Scope::Impl::Impl(const Scope& other, Tags::ControlDeps, std::vector<Operation> control_deps, bool clear_control_deps) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(other.impl()->name_map_), refiner_(other.impl()->refiner_), scope_used_(other.impl()->scope_used_), control_deps_( clear_control_deps ? std::vector<Operation>() : (control_deps.insert(control_deps.begin(), other.impl()->control_deps_.begin(), other.impl()->control_deps_.end()), control_deps)), name_(other.impl()->name_), op_name_(other.impl()->op_name_), exit_on_error_(other.impl()->exit_on_error_), kernel_label_(other.impl()->kernel_label_), device_(other.impl()->device_), assigned_device_(other.impl()->assigned_device_), xla_cluster_(other.impl()->xla_cluster_), colocation_constraints_(other.impl()->colocation_constraints_), disable_shape_inference_(other.impl()->disable_shape_inference_) {} Scope::Impl::Impl(const Scope& other, Tags::Device, const string& device) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(other.impl()->name_map_), refiner_(other.impl()->refiner_), scope_used_(other.impl()->scope_used_), control_deps_(other.impl()->control_deps_), name_(other.impl()->name_), op_name_(other.impl()->op_name_), exit_on_error_(other.impl()->exit_on_error_), kernel_label_(other.impl()->kernel_label_), device_(device), assigned_device_(other.impl()->assigned_device_), xla_cluster_(other.impl()->xla_cluster_), colocation_constraints_(other.impl()->colocation_constraints_), disable_shape_inference_(other.impl()->disable_shape_inference_) {} Scope::Impl::Impl(const Scope& other, Tags::SingleUseScope, const string& op_name) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(other.impl()->name_map_), refiner_(other.impl()->refiner_), scope_used_(new bool(false)), control_deps_(other.impl()->control_deps_), name_(other.impl()->name_), op_name_(op_name), exit_on_error_(other.impl()->exit_on_error_), kernel_label_(other.impl()->kernel_label_), device_(other.impl()->device_), assigned_device_(other.impl()->assigned_device_), xla_cluster_(other.impl()->xla_cluster_), colocation_constraints_(other.impl()->colocation_constraints_), disable_shape_inference_(other.impl()->disable_shape_inference_) {} Scope::Impl::Impl(const Scope& other, Tags::ExitOnError) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(other.impl()->name_map_), refiner_(other.impl()->refiner_), scope_used_(other.impl()->scope_used_), control_deps_(other.impl()->control_deps_), name_(other.impl()->name_), op_name_(other.impl()->op_name_), exit_on_error_(true), kernel_label_(other.impl()->kernel_label_), device_(other.impl()->device_), assigned_device_(other.impl()->assigned_device_), xla_cluster_(other.impl()->xla_cluster_), colocation_constraints_(other.impl()->colocation_constraints_), disable_shape_inference_(other.impl()->disable_shape_inference_) {} Scope::Impl::Impl(const Scope& other, Tags::KernelLabel, const string& kernel_label) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(other.impl()->name_map_), refiner_(other.impl()->refiner_), scope_used_(other.impl()->scope_used_), control_deps_(other.impl()->control_deps_), name_(other.impl()->name_), op_name_(other.impl()->op_name_), exit_on_error_(other.impl()->exit_on_error_), kernel_label_(kernel_label), device_(other.impl()->device_), assigned_device_(other.impl()->assigned_device_), xla_cluster_(other.impl()->xla_cluster_), colocation_constraints_(other.impl()->colocation_constraints_), disable_shape_inference_(other.impl()->disable_shape_inference_) {} Scope::Impl::Impl(const Scope& other, Tags::Colocate, const Operation& colocate_with_op, bool clear_colocations) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(other.impl()->name_map_), refiner_(other.impl()->refiner_), scope_used_(other.impl()->scope_used_), control_deps_(other.impl()->control_deps_), name_(other.impl()->name_), op_name_(other.impl()->op_name_), exit_on_error_(other.impl()->exit_on_error_), kernel_label_(other.impl()->kernel_label_), device_(other.impl()->device_), assigned_device_(other.impl()->assigned_device_), xla_cluster_(other.impl()->xla_cluster_), colocation_constraints_( clear_colocations ? std::unordered_set<string>() : other.impl()->GetColocationConstraints(colocate_with_op)), disable_shape_inference_(other.impl()->disable_shape_inference_) {} Scope::Impl::Impl(const Scope& other, Tags::AssignedDevice, const string& assigned_device) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(other.impl()->name_map_), refiner_(other.impl()->refiner_), scope_used_(other.impl()->scope_used_), control_deps_(other.impl()->control_deps_), name_(other.impl()->name_), op_name_(other.impl()->op_name_), exit_on_error_(other.impl()->exit_on_error_), kernel_label_(other.impl()->kernel_label_), device_(other.impl()->device_), assigned_device_(assigned_device), xla_cluster_(other.impl()->xla_cluster_), colocation_constraints_(other.impl()->colocation_constraints_), disable_shape_inference_(other.impl()->disable_shape_inference_) {} Scope::Impl::Impl(const Scope& other, Tags::XlaCluster, const string& xla_cluster) : graph_(other.impl()->graph_), status_(other.impl()->status_), name_map_(other.impl()->name_map_), refiner_(other.impl()->refiner_), scope_used_(other.impl()->scope_used_), control_deps_(other.impl()->control_deps_), name_(other.impl()->name_), op_name_(other.impl()->op_name_), exit_on_error_(other.impl()->exit_on_error_), kernel_label_(other.impl()->kernel_label_), device_(other.impl()->device_), assigned_device_(other.impl()->assigned_device_), xla_cluster_(xla_cluster), colocation_constraints_(other.impl()->colocation_constraints_), disable_shape_inference_(other.impl()->disable_shape_inference_) {} std::unordered_set<string> Scope::Impl::GetColocationConstraints( const Operation& colocate_with_op) const { std::unordered_set<string> current_constraints(colocation_constraints_); const AttrSlice attrs = colocate_with_op.node()->attrs(); std::vector<string> node_constraints; if (TryGetNodeAttr(attrs, kColocationAttrName, &node_constraints)) { for (const string& entry : node_constraints) { StringPiece s(entry); if (absl::ConsumePrefix(&s, kColocationGroupPrefix)) { current_constraints.emplace(s); } } } else { current_constraints.insert(colocate_with_op.node()->name()); } return current_constraints; } bool Scope::ok() const { return impl()->status_->ok(); } Graph* Scope::graph() const { return impl()->graph_.get(); } std::shared_ptr<Graph> Scope::graph_as_shared_ptr() const { return impl()->graph_; } Status Scope::status() const { return *impl()->status_; } const std::vector<Operation>& Scope::control_deps() const { return impl()->control_deps_; } void Scope::UpdateStatus(const Status& s) const { impl()->status_->Update(s); if (impl()->exit_on_error_ && !ok()) { LOG(FATAL) << *impl()->status_; } } Status Scope::ToGraphDef(GraphDef* gdef, bool include_debug_info) const { if (!ok()) { return *impl()->status_; } graph()->ToGraphDef(gdef, true, include_debug_info); return absl::OkStatus(); } Status Scope::ToGraph(Graph* g, GraphConstructorOptions opts) const { if (ok()) { GraphDef graph_def; graph()->ToGraphDef(&graph_def); UpdateStatus(ConvertGraphDefToGraph(opts, std::move(graph_def), g)); } return *impl()->status_; } void Scope::UpdateBuilder(NodeBuilder* builder) const { std::vector<Node*> control_inputs; for (const auto& op : impl()->control_deps_) { control_inputs.push_back(op.node()); } builder->ControlInputs(control_inputs); if (!impl()->kernel_label_.empty()) { builder->Attr("_kernel", impl()->kernel_label_); } if (!impl()->colocation_constraints_.empty()) { std::vector<string> constraints(impl()->colocation_constraints_.begin(), impl()->colocation_constraints_.end()); std::sort(constraints.begin(), constraints.end()); std::transform(constraints.begin(), constraints.end(), constraints.begin(), [](const string& s) { return strings::StrCat(kColocationGroupPrefix, s); }); builder->Attr(kColocationAttrName, constraints); } if (!impl()->device_.empty()) { builder->Device(impl()->device_); } if (!impl()->assigned_device_.empty()) { builder->AssignedDevice(impl()->assigned_device_); } if (!impl()->xla_cluster_.empty()) { builder->XlaCluster(impl()->xla_cluster_); } } string Scope::Impl::GetUniqueName(const string& prefix, bool check_single_use) const { if (check_single_use && single_use_scope()) { if (*scope_used_) { *status_ = errors::AlreadyExists(prefix, " already exists in the current scope"); return ""; } *scope_used_ = true; return prefix; } auto entry = name_map_->find(prefix); if (entry == name_map_->end()) { name_map_->insert({prefix, 0}); return prefix; } string unique_name; do { unique_name = strings::StrCat(prefix, kSuffixSeparator, ++entry->second); } while (name_map_->find(unique_name) != name_map_->end()); name_map_->insert({unique_name, 0}); return unique_name; } string Scope::Impl::GetNameForOp(const string& default_name) const { const string unique_name = GetUniqueName(default_name, true ); const string sep = name_.empty() || unique_name.empty() ? "" : kScopeSeparator; return strings::StrCat(name_, sep, unique_name); } string Scope::GetUniqueNameForOp(const string& default_name) const { if (impl()->single_use_scope()) { if (impl()->op_name_.empty() || *impl()->scope_used_) { *impl()->status_ = errors::InvalidArgument("Cannot get a unique name in this scope"); return ""; } *impl()->scope_used_ = true; return impl()->op_name_; } return impl()->op_name_.empty() ? impl()->GetNameForOp(default_name) : impl()->GetNameForOp(impl()->op_name_); } Scope Scope::NewSubScope(const string& child_scope_name) const { if (child_scope_name.empty()) { return Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->name_, true )); } const string unique_name = impl()->GetUniqueName(child_scope_name, false ); const string sep = impl()->name_.empty() || unique_name.empty() ? "" : kScopeSeparator; return Scope(new Impl(*this, Impl::Tags::ScopeName(), strings::StrCat(impl()->name_, sep, unique_name), false )); } Scope Scope::WithOpNameImpl(const string& op_name) const { if (impl()->single_use_scope()) { UpdateStatus(errors::InvalidArgument("Cannot set op name ", op_name, " on this scope")); return *this; } return Scope(new Impl(*this, Impl::Tags::OpName(), impl()->name_, op_name)); } Scope Scope::WithControlDependencies( const absl::Span<const Operation> control_deps) const { return Scope( new Impl(*this, Impl::Tags::ControlDeps(), std::vector<Operation>(control_deps.begin(), control_deps.end()), false)); } Scope Scope::WithControlDependencies(const Output& control_dep) const { return Scope(new Impl(*this, Impl::Tags::ControlDeps(), std::vector<Operation>(1, control_dep.op()), false)); } Scope Scope::WithNoControlDependencies() const { return Scope(new Impl(*this, Impl::Tags::ControlDeps(), std::vector<Operation>(), true)); } Scope Scope::WithDevice(const string& device) const { return Scope(new Impl(*this, Impl::Tags::Device(), device)); } Scope Scope::WithAssignedDevice(const string& assigned_device) const { return Scope(new Impl(*this, Impl::Tags::AssignedDevice(), assigned_device)); } Scope Scope::WithXlaCluster(const string& xla_cluster) const { return Scope(new Impl(*this, Impl::Tags::XlaCluster(), xla_cluster)); } Scope Scope::ColocateWith(const Operation& op) const { return Scope(new Impl(*this, Impl::Tags::Colocate(), op, false)); } Scope Scope::ClearColocation() const { return Scope(new Impl(*this, Impl::Tags::Colocate(), Operation(), true)); } Scope Scope::ExitOnError() const { return Scope(new Impl(*this, Impl::Tags::ExitOnError())); } Scope Scope::WithKernelLabel(const string& kernel_label) const { return Scope(new Impl(*this, Impl::Tags::KernelLabel(), kernel_label)); } CompositeOpScopes Scope::GetCompositeOpScopes( const string& composite_op_name) const { if (impl()->op_name_.empty() && composite_op_name.empty()) { UpdateStatus(errors::InvalidArgument( "Cannot create composite op scopes with empty name")); return {*this, *this}; } if (!impl()->single_use_scope()) { Scope child = NewSubScope(impl()->op_name_.empty() ? composite_op_name : impl()->op_name_); const string child_op_sep = impl()->name_.empty() ? "" : kSuffixSeparator; const string child_name = strings::StrCat(impl()->name_, child_op_sep, child.impl()->name_); return {child, Scope(new Impl(child, Impl::Tags::SingleUseScope(), child_name))}; } else { return {Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->op_name_, true )), *this}; } } Status Scope::DoShapeInference(Node* node) const { if (impl_->disable_shape_inference_) return absl::OkStatus(); return impl_->refiner_->AddNode(node); } class InternalScope { public: static Scope NewScope(Graph* graph, Status* status, ShapeRefiner* refiner) { Scope::Impl::NameMap* name_map = new Scope::Impl::NameMap; for (const Node* node : graph->nodes()) { const string& name = node->name(); (*name_map)[name] = 0; size_t idx = -1; while ((idx = name.find(kScopeSeparator, idx + 1)) != string::npos) { (*name_map)[name.substr(0, idx)] = 0; } } return Scope(new Scope::Impl( std::shared_ptr<Graph>(graph, [](Graph*) {}), std::shared_ptr<Status>(status, [](Status*) {}), std::shared_ptr<Scope::Impl::NameMap>(name_map), std::shared_ptr<ShapeRefiner>(refiner, [](ShapeRefiner*) {}))); } }; Scope NewInternalScope(Graph* graph, Status* status, ShapeRefiner* refiner) { return InternalScope::NewScope(graph, status, refiner); } Status CreateOutputWithScope(string op_name, absl::Span<const ::tensorflow::Input> inputs, const Scope& scope, Output* output) { TF_RETURN_IF_ERROR(scope.status()); const auto unique_name = scope.GetUniqueNameForOp(op_name); auto builder = ::tensorflow::NodeBuilder(unique_name, op_name); for (const auto& input : inputs) { TF_RETURN_IF_ERROR(scope.status()); builder = builder.Input(input.node()); } ::tensorflow::Node* ret; scope.UpdateBuilder(&builder); TF_RETURN_IF_ERROR(scope.status()); scope.UpdateStatus(builder.Finalize(scope.graph(), &ret)); TF_RETURN_IF_ERROR(scope.status()); *output = Output(ret, 0); return absl::OkStatus(); } }
#include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(ScopeTest, BasicNames) { Scope root = Scope::NewRootScope(); EXPECT_EQ(root.GetUniqueNameForOp("add"), "add"); EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_1"); EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_2"); EXPECT_EQ(root.GetUniqueNameForOp("mul"), "mul"); } TEST(ScopeTest, OpAndScopeNameCollision) { Scope root = Scope::NewRootScope(); EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo"); EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_1"); EXPECT_EQ(root.GetUniqueNameForOp("foo_1"), "foo_1_1"); EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2"); EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_3"); EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2_1"); } TEST(ScopeTest, HierarchicalNames) { Scope root = Scope::NewRootScope(); Scope child = root.NewSubScope("child"); EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add"); EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add_1"); EXPECT_EQ(child.GetUniqueNameForOp("mul"), "child/mul"); Scope child_1 = root.NewSubScope("child"); EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add"); EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add_1"); EXPECT_EQ(child_1.GetUniqueNameForOp("mul"), "child_1/mul"); Scope c_c = root.NewSubScope("c").NewSubScope("c"); EXPECT_EQ(c_c.GetUniqueNameForOp("add"), "c/c/add"); Scope c_1 = root.NewSubScope("c"); Scope c_1_c = c_1.NewSubScope("c"); EXPECT_EQ(c_1_c.GetUniqueNameForOp("add"), "c_1/c/add"); Scope c_1_c_1 = c_1.NewSubScope("c"); EXPECT_EQ(c_1_c_1.GetUniqueNameForOp("add"), "c_1/c_1/add"); EXPECT_EQ(root.NewSubScope("").NewSubScope("").GetUniqueNameForOp("d"), "d"); EXPECT_EQ(root.NewSubScope("").GetUniqueNameForOp("d"), "d_1"); EXPECT_EQ(root.GetUniqueNameForOp("d"), "d_2"); } TEST(ScopeTest, ScopeAndOpNames) { Scope root = Scope::NewRootScope(); Scope child = root.NewSubScope("child"); EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add"); EXPECT_EQ(root.GetUniqueNameForOp("child"), "child_1"); EXPECT_EQ(root.NewSubScope("child").GetUniqueNameForOp("p"), "child_2/p"); } namespace { string LastOp(const Scope& scope) { return scope.GetUniqueNameForOp("Last"); } std::vector<string> AnotherCompositeOp(const Scope& scope) { auto cop_scopes = scope.GetCompositeOpScopes("another_cop"); const string c1 = cop_scopes.child.GetUniqueNameForOp("c1"); const string c2 = cop_scopes.child.GetUniqueNameForOp("mul"); return {c1, c2, LastOp(cop_scopes.last)}; } std::vector<string> LinearOp(const Scope& scope) { auto cop_scopes = scope.GetCompositeOpScopes("linear"); Scope linear = cop_scopes.child; const string mul_op_name = linear.GetUniqueNameForOp("mul"); const string bias_add_op_name = linear.GetUniqueNameForOp("bias_add"); auto cop_names = AnotherCompositeOp(cop_scopes.last); return {mul_op_name, bias_add_op_name, cop_names[0], cop_names[1], cop_names[2]}; } } TEST(ScopeTest, CompositeOp) { Scope root = Scope::NewRootScope(); const auto names1 = LinearOp(root); EXPECT_EQ(names1[0], "linear/mul"); EXPECT_EQ(names1[1], "linear/bias_add"); EXPECT_EQ(names1[2], "linear/c1"); EXPECT_EQ(names1[3], "linear/mul_1"); EXPECT_EQ(names1[4], "linear"); EXPECT_EQ(root.GetUniqueNameForOp("linear"), "linear_1"); const auto names2 = LinearOp(root); EXPECT_EQ(names2[0], "linear_2/mul"); EXPECT_EQ(names2[1], "linear_2/bias_add"); EXPECT_EQ(names2[2], "linear_2/c1"); EXPECT_EQ(names2[3], "linear_2/mul_1"); EXPECT_EQ(names2[4], "linear_2"); const auto names3 = LinearOp(root.WithOpName("c")); EXPECT_EQ(names3[0], "c/mul"); EXPECT_EQ(names3[1], "c/bias_add"); EXPECT_EQ(names3[2], "c/c1"); EXPECT_EQ(names3[3], "c/mul_1"); EXPECT_EQ(names3[4], "c"); } TEST(ScopeTest, SingleUseScope) { Scope root = Scope::NewRootScope(); auto cop_scopes = root.GetCompositeOpScopes("cop"); EXPECT_EQ(cop_scopes.last.GetUniqueNameForOp("foo"), "cop"); cop_scopes.last.GetUniqueNameForOp("foo"); EXPECT_FALSE(cop_scopes.last.ok()); } TEST(ScopeTest, ControlDeps) { Scope root = Scope::NewRootScope(); auto c1 = Operation(); auto c2 = Operation(); Scope c = root.WithControlDependencies({c1, c2}); EXPECT_EQ(c.control_deps().size(), 2); Scope c_c = c.WithControlDependencies({Operation()}); EXPECT_EQ(c_c.control_deps().size(), 3); } TEST(ScopeTest, CreateOutput) { Scope root = Scope::NewRootScope(); Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT); Output add; ASSERT_TRUE( CreateOutputWithScope("Add", {a, a}, root.WithOpName("add"), &add).ok()); EXPECT_EQ(add.node()->name(), "add"); EXPECT_EQ(add.node()->type_string(), "Add"); } }
Graph* Scope::graph() const { return impl()->graph_.get(); } std::shared_ptr<Graph> Scope::graph_as_shared_ptr() const { return impl()->graph_; }
TEST(ScopeTest, CreateOutput) { Scope root = Scope::NewRootScope(); Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT); Output add; ASSERT_TRUE( CreateOutputWithScope("Add", {a, a}, root.WithOpName("add"), &add).ok()); EXPECT_EQ(add.node()->name(), "add"); EXPECT_EQ(add.node()->type_string(), "Add"); }
#ifndef XLA_PJRT_C_PJRT_C_API_H_ #define XLA_PJRT_C_PJRT_C_API_H_ #include <stdbool.h> #include <stddef.h> #include <stdint.h> #define PJRT_STRUCT_SIZE(struct_type, last_field) \ offsetof(struct_type, last_field) + sizeof(((struct_type*)0)->last_field) #define PJRT_DEFINE_STRUCT_TRAITS(sname, last_field) \ typedef struct sname sname; \ enum { sname##_STRUCT_SIZE = PJRT_STRUCT_SIZE(sname, last_field) } #ifdef __cplusplus extern "C" { #endif typedef enum { PJRT_Extension_Type_Gpu_Custom_Call = 0, PJRT_Extension_Type_Profiler, PJRT_Extension_Type_Custom_Partitioner, PJRT_Extension_Type_Stream, PJRT_Extension_Type_Layouts, PJRT_Extension_Type_FFI, } PJRT_Extension_Type; typedef struct PJRT_Extension_Base { size_t struct_size; PJRT_Extension_Type type; struct PJRT_Extension_Base* next; } PJRT_Extension_Base; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Extension_Base, next); #define PJRT_API_MAJOR 0 #define PJRT_API_MINOR 54 struct PJRT_Api_Version { size_t struct_size; PJRT_Extension_Base* extension_start; int major_version; int minor_version; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Api_Version, minor_version); typedef struct PJRT_Error PJRT_Error; struct PJRT_Error_Destroy_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Error* error; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Error_Destroy_Args, error); typedef void PJRT_Error_Destroy(PJRT_Error_Destroy_Args* args); struct PJRT_Error_Message_Args { size_t struct_size; PJRT_Extension_Base* extension_start; const PJRT_Error* error; const char* message; size_t message_size; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Error_Message_Args, message_size); typedef void PJRT_Error_Message(PJRT_Error_Message_Args* args); typedef enum { PJRT_Error_Code_CANCELLED = 1, PJRT_Error_Code_UNKNOWN = 2, PJRT_Error_Code_INVALID_ARGUMENT = 3, PJRT_Error_Code_DEADLINE_EXCEEDED = 4, PJRT_Error_Code_NOT_FOUND = 5, PJRT_Error_Code_ALREADY_EXISTS = 6, PJRT_Error_Code_PERMISSION_DENIED = 7, PJRT_Error_Code_RESOURCE_EXHAUSTED = 8, PJRT_Error_Code_FAILED_PRECONDITION = 9, PJRT_Error_Code_ABORTED = 10, PJRT_Error_Code_OUT_OF_RANGE = 11, PJRT_Error_Code_UNIMPLEMENTED = 12, PJRT_Error_Code_INTERNAL = 13, PJRT_Error_Code_UNAVAILABLE = 14, PJRT_Error_Code_DATA_LOSS = 15, PJRT_Error_Code_UNAUTHENTICATED = 16 } PJRT_Error_Code; struct PJRT_Error_GetCode_Args { size_t struct_size; PJRT_Extension_Base* extension_start; const PJRT_Error* error; PJRT_Error_Code code; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Error_GetCode_Args, code); typedef PJRT_Error* PJRT_Error_GetCode(PJRT_Error_GetCode_Args* args); typedef PJRT_Error* (*PJRT_CallbackError)(PJRT_Error_Code code, const char* message, size_t message_size); typedef enum { PJRT_NamedValue_kString = 0, PJRT_NamedValue_kInt64, PJRT_NamedValue_kInt64List, PJRT_NamedValue_kFloat, PJRT_NamedValue_kBool, } PJRT_NamedValue_Type; struct PJRT_NamedValue { size_t struct_size; PJRT_Extension_Base* extension_start; const char* name; size_t name_size; PJRT_NamedValue_Type type; union { const char* string_value; int64_t int64_value; const int64_t* int64_array_value; float float_value; bool bool_value; }; size_t value_size; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_NamedValue, value_size); struct PJRT_Plugin_Initialize_Args { size_t struct_size; PJRT_Extension_Base* extension_start; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Plugin_Initialize_Args, extension_start); typedef PJRT_Error* PJRT_Plugin_Initialize(PJRT_Plugin_Initialize_Args* args); struct PJRT_Plugin_Attributes_Args { size_t struct_size; PJRT_Extension_Base* extension_start; const PJRT_NamedValue* attributes; size_t num_attributes; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Plugin_Attributes_Args, attributes); typedef PJRT_Error* PJRT_Plugin_Attributes(PJRT_Plugin_Attributes_Args* args); typedef struct PJRT_Event PJRT_Event; struct PJRT_Event_Destroy_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Event* event; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_Destroy_Args, event); typedef PJRT_Error* PJRT_Event_Destroy(PJRT_Event_Destroy_Args* args); struct PJRT_Event_IsReady_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Event* event; bool is_ready; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_IsReady_Args, is_ready); typedef PJRT_Error* PJRT_Event_IsReady(PJRT_Event_IsReady_Args* args); struct PJRT_Event_Error_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Event* event; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_Error_Args, event); typedef PJRT_Error* PJRT_Event_Error(PJRT_Event_Error_Args* args); struct PJRT_Event_Await_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Event* event; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_Await_Args, event); typedef PJRT_Error* PJRT_Event_Await(PJRT_Event_Await_Args* args); typedef void (*PJRT_Event_OnReadyCallback)(PJRT_Error* error, void* user_arg); struct PJRT_Event_OnReady_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Event* event; PJRT_Event_OnReadyCallback callback; void* user_arg; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_OnReady_Args, user_arg); typedef PJRT_Error* PJRT_Event_OnReady(PJRT_Event_OnReady_Args* args); typedef struct PJRT_Client PJRT_Client; typedef struct PJRT_Device PJRT_Device; typedef struct PJRT_Memory PJRT_Memory; typedef struct PJRT_DeviceDescription PJRT_DeviceDescription; typedef struct PJRT_TopologyDescription PJRT_TopologyDescription; typedef struct PJRT_Executable PJRT_Executable; typedef struct PJRT_LoadedExecutable PJRT_LoadedExecutable; typedef struct PJRT_Buffer PJRT_Buffer; typedef void (*PJRT_KeyValueGetCallback_ValueDeleter)(char* value); struct PJRT_KeyValueGetCallback_Args { size_t struct_size; PJRT_Extension_Base* extension_start; const char* key; size_t key_size; int timeout_in_ms; PJRT_CallbackError* callback_error; void* user_arg; char* value; size_t value_size; PJRT_KeyValueGetCallback_ValueDeleter value_deleter_callback; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_KeyValueGetCallback_Args, value_deleter_callback); typedef PJRT_Error* (*PJRT_KeyValueGetCallback)( PJRT_KeyValueGetCallback_Args* args); struct PJRT_KeyValuePutCallback_Args { size_t struct_size; PJRT_Extension_Base* extension_start; const char* key; size_t key_size; const char* value; size_t value_size; PJRT_CallbackError* callback_error; void* user_arg; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_KeyValuePutCallback_Args, user_arg); typedef PJRT_Error* (*PJRT_KeyValuePutCallback)( PJRT_KeyValuePutCallback_Args* args); struct PJRT_Client_Create_Args { size_t struct_size; PJRT_Extension_Base* extension_start; const PJRT_NamedValue* create_options; size_t num_options; PJRT_KeyValueGetCallback kv_get_callback; void* kv_get_user_arg; PJRT_KeyValuePutCallback kv_put_callback; void* kv_put_user_arg; PJRT_Client* client; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Create_Args, client); typedef PJRT_Error* PJRT_Client_Create(PJRT_Client_Create_Args* args); struct PJRT_Client_Destroy_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Destroy_Args, client); typedef PJRT_Error* PJRT_Client_Destroy(PJRT_Client_Destroy_Args* args); struct PJRT_Client_PlatformName_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; const char* platform_name; size_t platform_name_size; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_PlatformName_Args, platform_name_size); typedef PJRT_Error* PJRT_Client_PlatformName( PJRT_Client_PlatformName_Args* args); struct PJRT_Client_ProcessIndex_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; int process_index; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_ProcessIndex_Args, process_index); typedef PJRT_Error* PJRT_Client_ProcessIndex( PJRT_Client_ProcessIndex_Args* args); struct PJRT_Client_PlatformVersion_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; const char* platform_version; size_t platform_version_size; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_PlatformVersion_Args, platform_version_size); typedef PJRT_Error* PJRT_Client_PlatformVersion( PJRT_Client_PlatformVersion_Args* args); struct PJRT_Client_TopologyDescription_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; PJRT_TopologyDescription* topology; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_TopologyDescription_Args, topology); typedef PJRT_Error* PJRT_Client_TopologyDescription( PJRT_Client_TopologyDescription_Args* args); struct PJRT_Client_Devices_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; PJRT_Device* const* devices; size_t num_devices; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Devices_Args, num_devices); typedef PJRT_Error* PJRT_Client_Devices(PJRT_Client_Devices_Args* args); struct PJRT_Client_AddressableDevices_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; PJRT_Device* const* addressable_devices; size_t num_addressable_devices; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_AddressableDevices_Args, num_addressable_devices); typedef PJRT_Error* PJRT_Client_AddressableDevices( PJRT_Client_AddressableDevices_Args* args); struct PJRT_Client_LookupDevice_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; int id; PJRT_Device* device; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_LookupDevice_Args, device); typedef PJRT_Error* PJRT_Client_LookupDevice( PJRT_Client_LookupDevice_Args* args); struct PJRT_Client_LookupAddressableDevice_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; int local_hardware_id; PJRT_Device* addressable_device; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_LookupAddressableDevice_Args, addressable_device); typedef PJRT_Error* PJRT_Client_LookupAddressableDevice( PJRT_Client_LookupAddressableDevice_Args* args); struct PJRT_Client_AddressableMemories_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; PJRT_Memory* const* addressable_memories; size_t num_addressable_memories; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_AddressableMemories_Args, num_addressable_memories); typedef PJRT_Error* PJRT_Client_AddressableMemories( PJRT_Client_AddressableMemories_Args* args); struct PJRT_Program { size_t struct_size; PJRT_Extension_Base* extension_start; char* code; size_t code_size; const char* format; size_t format_size; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Program, format_size); struct PJRT_Client_Compile_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; const PJRT_Program* program; const char* compile_options; size_t compile_options_size; PJRT_LoadedExecutable* executable; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Compile_Args, executable); typedef PJRT_Error* PJRT_Client_Compile(PJRT_Client_Compile_Args* args); struct PJRT_Client_DefaultDeviceAssignment_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; int num_replicas; int num_partitions; size_t default_assignment_size; int* default_assignment; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_DefaultDeviceAssignment_Args, default_assignment); typedef PJRT_Error* PJRT_Client_DefaultDeviceAssignment( PJRT_Client_DefaultDeviceAssignment_Args* args); typedef enum { PJRT_Buffer_Type_INVALID, PJRT_Buffer_Type_PRED, PJRT_Buffer_Type_S8, PJRT_Buffer_Type_S16, PJRT_Buffer_Type_S32, PJRT_Buffer_Type_S64, PJRT_Buffer_Type_U8, PJRT_Buffer_Type_U16, PJRT_Buffer_Type_U32, PJRT_Buffer_Type_U64, PJRT_Buffer_Type_F16, PJRT_Buffer_Type_F32, PJRT_Buffer_Type_F64, PJRT_Buffer_Type_BF16, PJRT_Buffer_Type_C64, PJRT_Buffer_Type_C128, PJRT_Buffer_Type_F8E5M2, PJRT_Buffer_Type_F8E4M3FN, PJRT_Buffer_Type_F8E4M3B11FNUZ, PJRT_Buffer_Type_F8E5M2FNUZ, PJRT_Buffer_Type_F8E4M3FNUZ, PJRT_Buffer_Type_S4, PJRT_Buffer_Type_U4, PJRT_Buffer_Type_TOKEN, PJRT_Buffer_Type_S2, PJRT_Buffer_Type_U2, } PJRT_Buffer_Type; typedef enum { PJRT_HostBufferSemantics_kImmutableOnlyDuringCall, PJRT_HostBufferSemantics_kImmutableUntilTransferCompletes, PJRT_HostBufferSemantics_kImmutableZeroCopy, PJRT_HostBufferSemantics_kMutableZeroCopy, } PJRT_HostBufferSemantics; typedef enum { PJRT_Buffer_MemoryLayout_Type_Tiled = 0, PJRT_Buffer_MemoryLayout_Type_Strides, } PJRT_Buffer_MemoryLayout_Type; struct PJRT_Buffer_MemoryLayout_Tiled { size_t struct_size; PJRT_Extension_Base* extension_start; const int64_t* minor_to_major; size_t minor_to_major_size; const int64_t* tile_dims; const size_t* tile_dim_sizes; size_t num_tiles; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_MemoryLayout_Tiled, num_tiles); struct PJRT_Buffer_MemoryLayout_Strides { size_t struct_size; PJRT_Extension_Base* extension_start; const int64_t* byte_strides; size_t num_byte_strides; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_MemoryLayout_Strides, num_byte_strides); struct PJRT_Buffer_MemoryLayout { size_t struct_size; PJRT_Extension_Base* extension_start; union { PJRT_Buffer_MemoryLayout_Tiled tiled; PJRT_Buffer_MemoryLayout_Strides strides; }; PJRT_Buffer_MemoryLayout_Type type; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_MemoryLayout, type); struct PJRT_Client_BufferFromHostBuffer_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; const void* data; PJRT_Buffer_Type type; const int64_t* dims; size_t num_dims; const int64_t* byte_strides; size_t num_byte_strides; PJRT_HostBufferSemantics host_buffer_semantics; PJRT_Device* device; PJRT_Memory* memory; PJRT_Buffer_MemoryLayout* device_layout; PJRT_Event* done_with_host_buffer; PJRT_Buffer* buffer; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_BufferFromHostBuffer_Args, buffer); typedef PJRT_Error* PJRT_Client_BufferFromHostBuffer( PJRT_Client_BufferFromHostBuffer_Args* args); struct PJRT_Client_CreateViewOfDeviceBuffer_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; void* device_buffer_ptr; const int64_t* dims; size_t num_dims; PJRT_Buffer_Type element_type; PJRT_Buffer_MemoryLayout* layout; PJRT_Device* device; void (*on_delete_callback)(void* device_buffer_ptr, void* user_arg); void* on_delete_callback_arg; intptr_t stream; PJRT_Buffer* buffer; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_CreateViewOfDeviceBuffer_Args, buffer); typedef PJRT_Error* PJRT_Client_CreateViewOfDeviceBuffer( PJRT_Client_CreateViewOfDeviceBuffer_Args* args); struct PJRT_DeviceDescription_Id_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_DeviceDescription* device_description; int id; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_Id_Args, id); typedef PJRT_Error* PJRT_DeviceDescription_Id( PJRT_DeviceDescription_Id_Args* args); struct PJRT_DeviceDescription_ProcessIndex_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_DeviceDescription* device_description; int process_index; }; PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_ProcessIndex_Args, process_index);
#include "xla/pjrt/c/pjrt_c_api_test.h" #include <cstddef> #include <functional> #include <memory> #include <numeric> #include <string> #include <string_view> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/base/thread_annotations.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "absl/types/span.h" #include "xla/client/executable_build_options.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/pjrt/c/pjrt_c_api.h" #include "xla/pjrt/c/pjrt_c_api_helpers.h" #include "xla/pjrt/c/pjrt_c_api_test_base.h" #include "xla/pjrt/compile_options.pb.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/pjrt_future.h" #include "xla/service/computation_placer.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_parser.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tests/literal_test_util.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/status.h" namespace pjrt { namespace { constexpr absl::string_view module_add_one = R"(module { func.func @main(%arg0: tensor<f32>) -> tensor<f32> { %0 = "mhlo.copy"(%arg0) : (tensor<f32>) -> tensor<f32> %1 = mhlo.constant dense<1.000000e+00> : tensor<f32> %2 = mhlo.add %0, %1 : tensor<f32> return %2 : tensor<f32> }})"; constexpr absl::string_view kHloString = R"( HloModule TupleCreate_module: ENTRY %TupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) { %v1 = f32[] parameter(0) %v2 = f32[3]{0} parameter(1) %v3 = f32[2,3]{1,0} parameter(2) ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3) } )"; class TestCApiFactory { public: void Register(std::function<const PJRT_Api*()> factory, absl::string_view platform_name) { absl::MutexLock lock(&mu_); CHECK(!factory_); factory_ = std::move(factory); CHECK(platform_name_.empty()) << "Platform name already provided"; CHECK(!platform_name.empty()) << "Provided platform name is empty"; platform_name_ = platform_name; } std::function<const PJRT_Api*()> Get() const { absl::MutexLock lock(&mu_); CHECK(factory_) << "Test didn't call RegisterPjRtCApiTestFactory()"; return factory_; } std::string GetPlatformName() const { absl::MutexLock lock(&mu_); CHECK(!platform_name_.empty()) << "Test didn't call RegisterPjRtCApiTestFactory()"; return platform_name_; } private: mutable absl::Mutex mu_; std::function<const PJRT_Api*()> factory_ ABSL_GUARDED_BY(mu_); std::string platform_name_; }; TestCApiFactory& GetGlobalTestCApiFactory() { static auto* const factory = new TestCApiFactory; return *factory; } const PJRT_Api* GetCApi() { return GetGlobalTestCApiFactory().Get()(); } std::string GetPlatformName() { return GetGlobalTestCApiFactory().GetPlatformName(); } } void RegisterPjRtCApiTestFactory(std::function<const PJRT_Api*()> factory, absl::string_view platform_name) { GetGlobalTestCApiFactory().Register(std::move(factory), platform_name); } namespace { class PjrtCApiTest : public PjrtCApiTestBase { protected: PjrtCApiTest() : PjrtCApiTestBase(GetCApi()) {} std::string platform_name_ = GetPlatformName(); }; TEST_F(PjrtCApiTest, ApiVersion) { CHECK_EQ(api_->pjrt_api_version.major_version, PJRT_API_MAJOR); CHECK_EQ(api_->pjrt_api_version.minor_version, PJRT_API_MINOR); } TEST_F(PjrtCApiTest, PlatformName) { PJRT_Client_PlatformName_Args args; args.client = client_; args.struct_size = PJRT_Client_PlatformName_Args_STRUCT_SIZE; args.extension_start = nullptr; PJRT_Error* error = api_->PJRT_Client_PlatformName(&args); ASSERT_EQ(error, nullptr); absl::string_view platform_name(args.platform_name, args.platform_name_size); ASSERT_EQ(platform_name_, platform_name); } TEST_F(PjrtCApiTest, ClientProcessIndex) { PJRT_Client_ProcessIndex_Args process_index_args = PJRT_Client_ProcessIndex_Args{ .struct_size = PJRT_Client_ProcessIndex_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, .process_index = -1, }; PJRT_Error* error = api_->PJRT_Client_ProcessIndex(&process_index_args); CHECK_EQ(error, nullptr); CHECK_EQ(process_index_args.process_index, 0); } TEST_F(PjrtCApiTest, ClientDevices) { absl::Span<PJRT_Device* const> devices = GetClientDevices(); ASSERT_FALSE(devices.empty()); for (auto& device : devices) { ASSERT_TRUE(this->IsValidDeviceId(device)); } } TEST_F(PjrtCApiTest, ClientAddressableDevices) { absl::Span<PJRT_Device* const> addressable_devices = GetClientAddressableDevices(); ASSERT_FALSE(addressable_devices.empty()); for (auto& device : addressable_devices) { ASSERT_TRUE(this->IsValidDeviceId(device)); } absl::Span<PJRT_Device* const> client_devices = GetClientDevices(); for (auto& addressable_device : addressable_devices) { ASSERT_THAT(client_devices, ::testing::Contains(addressable_device)); } } TEST_F(PjrtCApiTest, LookupDevice) { PJRT_Client_LookupDevice_Args lookup_device_args = PJRT_Client_LookupDevice_Args{ .struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, .id = 0, .device = nullptr, }; PJRT_Error* lookup_device_error = api_->PJRT_Client_LookupDevice(&lookup_device_args); ASSERT_EQ(lookup_device_error, nullptr); int id = GetDeviceId(lookup_device_args.device); ASSERT_EQ(id, 0); } TEST_F(PjrtCApiTest, LookupAddressableDevice) { PJRT_Client_LookupAddressableDevice_Args lookup_addressable_device_args = PJRT_Client_LookupAddressableDevice_Args{ .struct_size = PJRT_Client_LookupAddressableDevice_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, .local_hardware_id = 0, .addressable_device = nullptr, }; PJRT_Error* lookup_addressable_device_error = api_->PJRT_Client_LookupAddressableDevice( &lookup_addressable_device_args); ASSERT_EQ(lookup_addressable_device_error, nullptr); int local_hardware_id = GetLocalHardwareId(lookup_addressable_device_args.addressable_device); ASSERT_EQ(local_hardware_id, 0); } TEST_F(PjrtCApiTest, GetDefaultDeviceAssignmentNominal) { constexpr int kNumReplicas = 2; constexpr int kNumPartitions = 1; std::vector<int> assignment_buffer(kNumReplicas * kNumPartitions); PJRT_Client_DefaultDeviceAssignment_Args args{ .struct_size = PJRT_Client_DefaultDeviceAssignment_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, .num_replicas = kNumReplicas, .num_partitions = kNumPartitions, .default_assignment_size = assignment_buffer.size(), .default_assignment = assignment_buffer.data(), }; auto error = ToUniquePtr(api_->PJRT_Client_DefaultDeviceAssignment(&args)); EXPECT_EQ(error, nullptr); } TEST_F(PjrtCApiTest, GetDefaultDeviceAssignmentBufferTooSmall) { constexpr int kNumReplicas = 4; constexpr int kNumPartitions = 2; constexpr size_t kBufferSize = 7; std::vector<int> assignment_buffer(kBufferSize); PJRT_Client_DefaultDeviceAssignment_Args args{ .struct_size = PJRT_Client_DefaultDeviceAssignment_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, .num_replicas = kNumReplicas, .num_partitions = kNumPartitions, .default_assignment_size = assignment_buffer.size(), .default_assignment = assignment_buffer.data(), }; auto error = ToUniquePtr(api_->PJRT_Client_DefaultDeviceAssignment(&args)); ASSERT_NE(error, nullptr); absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_); EXPECT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_EQ(status.message(), "PJRT_Client_DefaultDeviceAssignment: `default_assignment_size` 7" " < `num_replicas * num_partitions`, 4 * 2 = 8"); } TEST_F(PjrtCApiTest, LookupDeviceNegativeId) { PJRT_Client_LookupDevice_Args args = PJRT_Client_LookupDevice_Args{ .struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, .id = -1, .device = nullptr, }; absl::Status expected = absl::Status(absl::StatusCode::kInvalidArgument, "No matching device found for device_id -1"); auto error = ToUniquePtr(api_->PJRT_Client_LookupDevice(&args)); ASSERT_NE(error, nullptr); absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_); ASSERT_EQ(status, expected); } TEST_F(PjrtCApiTest, LookupDeviceOutOfRangeId) { int out_of_range_id = GetNumDevices(); PJRT_Client_LookupDevice_Args args = PJRT_Client_LookupDevice_Args{ .struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, .id = out_of_range_id, .device = nullptr, }; absl::Status expected = absl::Status( absl::StatusCode::kInvalidArgument, absl::StrCat("No matching device found for device_id ", out_of_range_id)); auto error = ToUniquePtr(api_->PJRT_Client_LookupDevice(&args)); ASSERT_NE(error, nullptr); absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_); ASSERT_EQ(status, expected); } static constexpr std::string_view kExecutableName = "operation"; void destroy_executable(PJRT_LoadedExecutable* executable, const PJRT_Api* api) { PJRT_LoadedExecutable_Destroy_Args args{ .struct_size = PJRT_LoadedExecutable_Destroy_Args_STRUCT_SIZE, .extension_start = nullptr, .executable = executable, }; PJRT_Error* error = api->PJRT_LoadedExecutable_Destroy(&args); CHECK_EQ(error, nullptr); } TEST_F(PjrtCApiTest, BufferTransferImmutableUntilTransferCompletes) { xla::Shape shape = xla::ShapeUtil::MakeShapeWithType<float>({4}); std::vector<float> float_data(4); std::iota(float_data.begin(), float_data.end(), 41.0f); PJRT_Client_BufferFromHostBuffer_Args args = CreateBufferFromHostBufferArgs( float_data, shape, xla::PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes); PJRT_Error* error = api_->PJRT_Client_BufferFromHostBuffer(&args); CHECK_EQ(error, nullptr); std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> buffer( args.buffer, ::pjrt::MakeBufferDeleter(api_)); std::unique_ptr<PJRT_Event, ::pjrt::PJRT_EventDeleter> event( args.done_with_host_buffer, ::pjrt::MakeEventDeleter(api_)); PJRT_Event_Await_Args await_args; await_args.struct_size = PJRT_Event_Await_Args_STRUCT_SIZE; await_args.extension_start = nullptr; await_args.event = event.get(); PJRT_Error* event_error = api_->PJRT_Event_Await(&await_args); ASSERT_EQ(event_error, nullptr); } TEST_F(PjrtCApiTest, Compile) { PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{ .struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, }; std::string options_str = BuildSingleDeviceCompileOptionStr(); args.compile_options = options_str.c_str(); args.compile_options_size = options_str.size(); std::string format(::pjrt::kMlirFormat); std::string program_code{module_add_one}; PJRT_Program program = PJRT_Program{ .struct_size = PJRT_Program_STRUCT_SIZE, .extension_start = nullptr, .code = program_code.data(), .code_size = program_code.length(), .format = format.c_str(), .format_size = format.size(), }; args.program = &program; PJRT_Error* error = api_->PJRT_Client_Compile(&args); ::pjrt::LogFatalIfPjrtError(error, api_); ASSERT_EQ(error, nullptr); destroy_executable(args.executable, api_); } TEST_F(PjrtCApiTest, CompileXlaComputation) { PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{ .struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, }; xla::DeviceAssignment device_assignment(1, 1); device_assignment(0, 0) = 0; xla::DeviceAssignmentProto proto; device_assignment.Serialize(&proto); std::string device_assignment_str = proto.SerializeAsString(); std::string options_str = BuildSingleDeviceCompileOptionStr(); args.compile_options = options_str.c_str(); args.compile_options_size = options_str.size(); absl::StatusOr<std::unique_ptr<xla::HloModule>> hlo_module = xla::ParseAndReturnUnverifiedModule(kHloString); ASSERT_EQ(hlo_module.ok(), true); std::string module_str = hlo_module->get()->ToProto().SerializeAsString(); std::string format(::pjrt::kHloFormat); PJRT_Program program = PJRT_Program{ .struct_size = PJRT_Program_STRUCT_SIZE, .extension_start = nullptr, .code = module_str.data(), .code_size = module_str.size(), .format = format.c_str(), .format_size = format.size(), }; args.program = &program; PJRT_Error* error = api_->PJRT_Client_Compile(&args); ::pjrt::LogFatalIfPjrtError(error, api_); ASSERT_EQ(error, nullptr); destroy_executable(args.executable, api_); } TEST_F(PjrtCApiTest, CompileInvalidOption) { PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{ .struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, }; std::string options_str = "invalid compile options"; args.compile_options = options_str.c_str(); args.compile_options_size = options_str.size(); std::string format(::pjrt::kMlirFormat); std::string program_code{module_add_one}; PJRT_Program program = PJRT_Program{ .struct_size = PJRT_Program_STRUCT_SIZE, .extension_start = nullptr, .code = program_code.data(), .code_size = program_code.length(), .format = format.c_str(), .format_size = format.size(), }; args.program = &program; PJRT_Error* error = api_->PJRT_Client_Compile(&args); absl::Status status = ::pjrt::PjrtErrorToStatus(error, api_); EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); EXPECT_EQ(status.message(), "PJRT_Client_Compile: failed to deserialize CompileOptionsProto"); destroy_executable(args.executable, api_); ::pjrt::MakeErrorDeleter(api_)(error); } TEST_F(PjrtCApiTest, CompileInvalidProgramFormat) { PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{ .struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, }; xla::DeviceAssignment device_assignment(1, 1); device_assignment(0, 0) = 0; xla::DeviceAssignmentProto proto; device_assignment.Serialize(&proto); std::string device_assignment_str = proto.SerializeAsString(); std::string options_str = BuildSingleDeviceCompileOptionStr(); args.compile_options = options_str.c_str(); args.compile_options_size = options_str.size(); std::string format("invalid"); PJRT_Program program = PJRT_Program{ .struct_size = PJRT_Program_STRUCT_SIZE, .extension_start = nullptr, .code = nullptr, .code_size = 0, .format = format.c_str(), .format_size = format.size(), }; args.program = &program; PJRT_Error* error = api_->PJRT_Client_Compile(&args); absl::Status status = ::pjrt::PjrtErrorToStatus(error, api_); EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); EXPECT_EQ(status.message(), "Unknown program format 'invalid'."); destroy_executable(args.executable, api_); ::pjrt::MakeErrorDeleter(api_)(error); } TEST_F(PjrtCApiTest, DeviceId) { auto* device = GetClientDevices()[0]; int id = GetDeviceId(device); CHECK_EQ(id, 0); } TEST_F(PjrtCApiTest, DeviceProcessIndex) { PJRT_DeviceDescription_ProcessIndex_Args args = PJRT_DeviceDescription_ProcessIndex_Args{ .struct_size = PJRT_DeviceDescription_ProcessIndex_Args_STRUCT_SIZE, .extension_start = nullptr, .device_description = ::pjrt::GetDeviceDescription(api_, GetClientDevices()[0]), .process_index = -1, }; PJRT_Error* error = api_->PJRT_DeviceDescription_ProcessIndex(&args); ASSERT_EQ(error, nullptr); CHECK_EQ(args.process_index, 0); } TEST_F(PjrtCApiTest, DeviceIsAddressable) { PJRT_Device_IsAddressable_Args args = PJRT_Device_IsAddressable_Args{ .struct_size = PJRT_Device_IsAddressable_Args_STRUCT_SIZE, .extension_start = nullptr, .device = GetClientDevices()[0], .is_addressable = false, }; PJRT_Error* error = api_->PJRT_Device_IsAddressable(&args); ASSERT_EQ(error, nullptr); CHECK_EQ(args.is_addressable, true); } TEST_F(PjrtCApiTest, DeviceLocalHardwareId) { PJRT_Device_LocalHardwareId_Args args = PJRT_Device_LocalHardwareId_Args{ .struct_size = PJRT_Device_LocalHardwareId_Args_STRUCT_SIZE, .extension_start = nullptr, .device = GetClientDevices()[0], .local_hardware_id = -1, }; PJRT_Error* error = api_->PJRT_Device_LocalHardwareId(&args); ASSERT_EQ(error, nullptr); CHECK_EQ(args.local_hardware_id, 0); } class PjrtCApiBufferTest : public PjrtCApiTest { protected: void SetUp() override { PjrtCApiTest::SetUp(); auto buffer_and_event = create_buffer(); buffer_ = std::move(buffer_and_event.first); event_ = buffer_and_event.second; } void TearDown() override { TF_CHECK_OK(event_.Await()); buffer_.reset(nullptr); PjrtCApiTest::TearDown(); } std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> buffer_; xla::PjRtFuture<> event_; }; TEST_F(PjrtCApiBufferTest, IsDeleted) { PJRT_Buffer_IsDeleted_Args is_deleted_args; is_deleted_args.struct_size = PJRT_Buffer_IsDeleted_Args_STRUCT_SIZE; is_deleted_args.extension_start = nullptr; is_deleted_args.buffer = buffer_.get(); PJRT_Error* is_deleted_error = api_->PJRT_Buffer_IsDeleted(&is_deleted_args); ASSERT_EQ(is_deleted_error, nullptr); ASSERT_FALSE(is_deleted_args.is_deleted); PJRT_Buffer_Delete_Args delete_args; delete_args.struct_size = PJRT_Buffer_Delete_Args_STRUCT_SIZE; delete_args.extension_start = nullptr; delete_args.buffer = buffer_.get(); PJRT_Error* delete_error = api_->PJRT_Buffer_Delete(&delete_args); ASSERT_EQ(delete_error, nullptr); is_deleted_error = api_->PJRT_Buffer_IsDeleted(&is_deleted_args); ASSERT_EQ(is_deleted_error, nullptr); ASSERT_TRUE(is_deleted_args.is_deleted); } TEST_F(PjrtCApiBufferTest, GetOnDeviceSizeInBytes) { PJRT_Buffer_OnDeviceSizeInBytes_Args args; args.struct_size = PJRT_Buffer_OnDeviceSizeInBytes_Args_STRUCT_SIZE; args.extension_start = nullptr; args.buffer = buffer_.get(); PJRT_Error* on_device_size_bytes_error = api_->PJRT_Buffer_OnDeviceSizeInBytes(&args); ASSERT_EQ(on_device_size_bytes_error, nullptr); ASSERT_GT(args.on_device_size_in_bytes, 0); } TEST_F(PjrtCApiBufferTest, ReadyEvent) { PJRT_Buffer_ReadyEvent_Args get_event_args; get_event_args.struct_size = PJRT_Buffer_ReadyEvent_Args_STRUCT_SIZE; get_event_args.extension_start = nullptr; get_event_args.buffer = buffer_.get(); auto error = ToUniquePtr(api_->PJRT_Buffer_ReadyEvent(&get_event_args)); ASSERT_EQ(error, nullptr); PJRT_Event* event = get_event_args.event; ASSERT_NE(event, nullptr); PJRT_Event_Await_Args await_args; await_args.struct_size = PJRT_Event_Await_Args_STRUCT_SIZE; await_args.extension_start = nullptr; await_args.event = event; error.reset(api_->PJRT_Event_Await(&await_args)); ASSERT_EQ(error, nullptr); PJRT_Event_IsReady_Args ready_args; ready_args.struct_size = PJRT_Event_IsReady_Args_STRUCT_SIZE; ready_args.extension_start = nullptr; ready_args.event = event; error.reset(api_->PJRT_Event_IsReady(&ready_args)); ASSERT_EQ(error, nullptr); EXPECT_TRUE(ready_args.is_ready); PJRT_Event_Destroy_Args destroy_args; destroy_args.struct_size = PJRT_Event_Destroy_Args_STRUCT_SIZE; destroy_args.extension_start = nullptr; destroy_args.event = event; error.reset(api_->PJRT_Event_Destroy(&destroy_args)); EXPECT_EQ(error, nullptr); } TEST_F(PjrtCApiBufferTest, ToHostBufferNoHostLayout) { PJRT_Buffer_ToHostBuffer_Args args; args.struct_size = PJRT_Buffer_ToHostBuffer_Args_STRUCT_SIZE; args.extension_start = nullptr; args.src = buffer_.get(); xla::Shape host_shape = xla::ShapeUtil::MakeShape(xla::F32, {4}); auto literal = std::make_shared<xla::Literal>(host_shape); args.host_layout = nullptr; args.dst = literal->untyped_data(); args.dst_size = xla::ShapeUtil::ByteSizeOfElements(host_shape); args.event = nullptr; PJRT_Error* error = api_->PJRT_Buffer_ToHostBuffer(&args); xla::PjRtFuture<> transfer_to_host = ::pjrt::ConvertCEventToCppFuture(args.event, api_); TF_CHECK_OK(transfer_to_host.Await()); EXPECT_EQ(error, nullptr); ASSERT_EQ(literal->data<float>().size(), 4); std::vector<float> float_data(4); std::iota(float_data.begin(), float_data.end(), 41.0f); EXPECT_TRUE(xla::LiteralTestUtil::Equal( xla::LiteralUtil::CreateR1<float>(float_data), *literal)); } TEST_F(PjrtCApiBufferTest, IncreaseAndDecreaseReferenceCount) { PJRT_Buffer_IncreaseExternalReferenceCount_Args increase_reference_count_args; increase_reference_count_args.struct_size = PJRT_Buffer_IncreaseExternalReferenceCount_Args_STRUCT_SIZE; increase_reference_count_args.extension_start = nullptr; increase_reference_count_args.buffer = buffer_.get(); PJRT_Error* increase_reference_count_error = api_->PJRT_Buffer_IncreaseExternalReferenceCount( &increase_reference_count_args); EXPECT_EQ(increase_reference_count_error, nullptr); PJRT_Buffer_DecreaseExternalReferenceCount_Args decrease_reference_count_args; decrease_reference_count_args.struct_size = PJRT_Buffer_DecreaseExternalReferenceCount_Args_STRUCT_SIZE; decrease_reference_count_args.extension_start = nullptr; decrease_reference_count_args.buffer = buffer_.get(); PJRT_Error* decrease_reference_error = api_->PJRT_Buffer_DecreaseExternalReferenceCount( &decrease_reference_count_args); EXPECT_EQ(decrease_reference_error, nullptr); } TEST_F(PjrtCApiBufferTest, DecreaseReferenceCountReturnsError) { PJRT_Buffer_DecreaseExternalReferenceCount_Args args; args.struct_size = PJRT_Buffer_DecreaseExternalReferenceCount_Args_STRUCT_SIZE; args.extension_start = nullptr; args.buffer = buffer_.get(); auto error = ToUniquePtr(api_->PJRT_Buffer_DecreaseExternalReferenceCount(&args)); ASSERT_NE(error, nullptr); absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_); EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); EXPECT_EQ(status.message(), "Attempting to decrease reference on a buffer with zero reference " "count."); } TEST_F(PjrtCApiBufferTest, OpaqueDeviceMemoryDataPointer) { PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args args; args.struct_size = PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args_STRUCT_SIZE; args.extension_start = nullptr; args.buffer = buffer_.get(); PJRT_Error* error = api_->PJRT_Buffer_OpaqueDeviceMemoryDataPointer(&args); EXPECT_EQ(error, nullptr); EXPECT_NE(args.device_memory_ptr, nullptr); } class PjrtCommonCApiHelpersTest : public PjrtCApiTest {}; TEST_F(PjrtCommonCApiHelpersTest, PjrtErrorToStatus) { EXPECT_TRUE(::pjrt::PjrtErrorToStatus(nullptr, api_).ok()); } } }
typedef PJRT_Error* PJRT_Client_Compile(PJRT_Client_Compile_Args* args); struct PJRT_Client_DefaultDeviceAssignment_Args { size_t struct_size; PJRT_Extension_Base* extension_start; PJRT_Client* client; int num_replicas; int num_partitions; size_t default_assignment_size; int* default_assignment; }
TEST_F(PjrtCApiTest, Compile) { PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{ .struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, }; std::string options_str = BuildSingleDeviceCompileOptionStr(); args.compile_options = options_str.c_str(); args.compile_options_size = options_str.size(); std::string format(::pjrt::kMlirFormat); std::string program_code{module_add_one}; PJRT_Program program = PJRT_Program{ .struct_size = PJRT_Program_STRUCT_SIZE, .extension_start = nullptr, .code = program_code.data(), .code_size = program_code.length(), .format = format.c_str(), .format_size = format.size(), }; args.program = &program; PJRT_Error* error = api_->PJRT_Client_Compile(&args); ::pjrt::LogFatalIfPjrtError(error, api_); ASSERT_EQ(error, nullptr); destroy_executable(args.executable, api_); } TEST_F(PjrtCApiTest, CompileXlaComputation) { PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{ .struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, }; xla::DeviceAssignment device_assignment(1, 1); device_assignment(0, 0) = 0; xla::DeviceAssignmentProto proto; device_assignment.Serialize(&proto); std::string device_assignment_str = proto.SerializeAsString(); std::string options_str = BuildSingleDeviceCompileOptionStr(); args.compile_options = options_str.c_str(); args.compile_options_size = options_str.size(); absl::StatusOr<std::unique_ptr<xla::HloModule>> hlo_module = xla::ParseAndReturnUnverifiedModule(kHloString); ASSERT_EQ(hlo_module.ok(), true); std::string module_str = hlo_module->get()->ToProto().SerializeAsString(); std::string format(::pjrt::kHloFormat); PJRT_Program program = PJRT_Program{ .struct_size = PJRT_Program_STRUCT_SIZE, .extension_start = nullptr, .code = module_str.data(), .code_size = module_str.size(), .format = format.c_str(), .format_size = format.size(), }; args.program = &program; PJRT_Error* error = api_->PJRT_Client_Compile(&args); ::pjrt::LogFatalIfPjrtError(error, api_); ASSERT_EQ(error, nullptr); destroy_executable(args.executable, api_); } TEST_F(PjrtCApiTest, CompileInvalidOption) { PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{ .struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, }; std::string options_str = "invalid compile options"; args.compile_options = options_str.c_str(); args.compile_options_size = options_str.size(); std::string format(::pjrt::kMlirFormat); std::string program_code{module_add_one}; PJRT_Program program = PJRT_Program{ .struct_size = PJRT_Program_STRUCT_SIZE, .extension_start = nullptr, .code = program_code.data(), .code_size = program_code.length(), .format = format.c_str(), .format_size = format.size(), }; args.program = &program; PJRT_Error* error = api_->PJRT_Client_Compile(&args); absl::Status status = ::pjrt::PjrtErrorToStatus(error, api_); EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); EXPECT_EQ(status.message(), "PJRT_Client_Compile: failed to deserialize CompileOptionsProto"); destroy_executable(args.executable, api_); ::pjrt::MakeErrorDeleter(api_)(error); } TEST_F(PjrtCApiTest, CompileInvalidProgramFormat) { PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{ .struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE, .extension_start = nullptr, .client = client_, }; xla::DeviceAssignment device_assignment(1, 1); device_assignment(0, 0) = 0; xla::DeviceAssignmentProto proto; device_assignment.Serialize(&proto); std::string device_assignment_str = proto.SerializeAsString(); std::string options_str = BuildSingleDeviceCompileOptionStr(); args.compile_options = options_str.c_str(); args.compile_options_size = options_str.size(); std::string format("invalid"); PJRT_Program program = PJRT_Program{ .struct_size = PJRT_Program_STRUCT_SIZE, .extension_start = nullptr, .code = nullptr, .code_size = 0, .format = format.c_str(), .format_size = format.size(), }; args.program = &program; PJRT_Error* error = api_->PJRT_Client_Compile(&args); absl::Status status = ::pjrt::PjrtErrorToStatus(error, api_); EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); EXPECT_EQ(status.message(), "Unknown program format 'invalid'."); destroy_executable(args.executable, api_); ::pjrt::MakeErrorDeleter(api_)(error); }
#include "tensorflow/tools/graph_transforms/transform_graph.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/lib/strings/scanner.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/util/command_line_flags.h" #include "tensorflow/tools/graph_transforms/file_utils.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" #if !defined(PLATFORM_WINDOWS) #include <pwd.h> #include <unistd.h> #endif namespace tensorflow { namespace graph_transforms { using tensorflow::strings::Scanner; Status ParseTransformParameters(const string& transforms_string, TransformParameters* params_list) { params_list->clear(); enum { TRANSFORM_NAME, TRANSFORM_PARAM_NAME, TRANSFORM_PARAM_VALUE, } state = TRANSFORM_NAME; StringPiece remaining(transforms_string); StringPiece match; StringPiece transform_name; StringPiece parameter_name; StringPiece parameter_value; TransformFuncParameters func_parameters; while (!remaining.empty()) { if (state == TRANSFORM_NAME) { func_parameters.clear(); Scanner(remaining).AnySpace().GetResult(&remaining, &match); if (remaining.empty()) { return OkStatus(); } const bool found_transform_name = Scanner(remaining) .Many(Scanner::LETTER_DIGIT_UNDERSCORE) .GetResult(&remaining, &transform_name); if (!found_transform_name) { return errors::InvalidArgument("Looking for transform name, but found ", string(remaining).c_str()); } if (Scanner(remaining).OneLiteral("(").GetResult(&remaining, &match)) { state = TRANSFORM_PARAM_NAME; } else { params_list->push_back({string(transform_name), func_parameters}); transform_name = ""; state = TRANSFORM_NAME; } } else if (state == TRANSFORM_PARAM_NAME) { if (Scanner(remaining).OneLiteral(")").GetResult(&remaining, &match)) { params_list->push_back({string(transform_name), func_parameters}); transform_name = ""; state = TRANSFORM_NAME; } else { Scanner(remaining).ZeroOrOneLiteral(",").GetResult(&remaining, &match); Scanner(remaining).AnySpace().GetResult(&remaining, &match); const bool found_parameter_name = Scanner(remaining) .Many(Scanner::LETTER_DIGIT_UNDERSCORE) .GetResult(&remaining, &parameter_name); if (!found_parameter_name) { return errors::InvalidArgument( "Looking for parameter name, but found ", string(remaining).c_str()); } if (Scanner(remaining).OneLiteral("=").GetResult(&remaining, &match)) { state = TRANSFORM_PARAM_VALUE; } else { return errors::InvalidArgument("Looking for =, but found ", string(remaining).c_str()); } } } else if (state == TRANSFORM_PARAM_VALUE) { bool found_parameter_value; if (Scanner(remaining).OneLiteral("\"").GetResult(&remaining, &match)) { found_parameter_value = Scanner(remaining).ScanEscapedUntil('"').GetResult( &remaining, &parameter_value); if (found_parameter_value) { Scanner(remaining).OneLiteral("\"").GetResult(&remaining, &match); } } else { found_parameter_value = Scanner(remaining) .Many(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE) .GetResult(&remaining, &parameter_value); } if (!found_parameter_value) { return errors::InvalidArgument("Looking for parameter name, but found ", string(remaining).c_str()); } func_parameters[string(parameter_name)].emplace_back(parameter_value); Scanner(remaining).ZeroOrOneLiteral("\"").GetResult(&remaining, &match); Scanner(remaining).ZeroOrOneLiteral("'").GetResult(&remaining, &match); state = TRANSFORM_PARAM_NAME; } } return OkStatus(); } std::string ExpandPath(const std::string& path_string) { #if defined(PLATFORM_WINDOWS) return path_string; #else if (path_string.empty() || path_string[0] != '~') { return path_string; } const char* home = nullptr; std::string::size_type prefix = path_string.find_first_of('/'); if (path_string.length() == 1 || prefix == 1) { home = getenv("HOME"); if (!home) { struct passwd* pw = getpwuid(getuid()); if (pw) { home = pw->pw_dir; } } } else { std::string user(path_string, 1, (prefix == std::string::npos) ? std::string::npos : prefix - 1); struct passwd* pw = getpwnam(user.c_str()); if (pw) { home = pw->pw_dir; } } if (!home) { return path_string; } string path(home); if (prefix == std::string::npos) { return path; } if (path.length() == 0 || path[path.length() - 1] != '/') { path += '/'; } path += path_string.substr(prefix + 1); return path; #endif } int ParseFlagsAndTransformGraph(int argc, char* argv[], bool init_main) { string in_graph_string = ""; string out_graph_string = ""; string inputs_string = ""; string outputs_string = ""; string transforms_string = ""; bool output_as_text = false; std::vector<Flag> flag_list = { Flag("in_graph", &in_graph_string, "input graph file name"), Flag("out_graph", &out_graph_string, "output graph file name"), Flag("inputs", &inputs_string, "inputs"), Flag("outputs", &outputs_string, "outputs"), Flag("transforms", &transforms_string, "list of transforms"), Flag("output_as_text", &output_as_text, "whether to write the graph in text protobuf format"), }; string usage = Flags::Usage(argv[0], flag_list); usage += "\nTransforms are:\n"; TransformRegistry* transform_registry = GetTransformRegistry(); for (const auto& pair : *transform_registry) { usage += pair.first + "\n"; } const bool parse_result = Flags::Parse(&argc, argv, flag_list); if (init_main) { port::InitMain(argv[0], &argc, &argv); } if (!parse_result) { LOG(ERROR) << usage; return -1; } if (argc > 1) { LOG(ERROR) << "Unknown argument " << argv[1] << ".\n" << usage; return -1; } if (in_graph_string.empty()) { LOG(ERROR) << "in_graph graph can't be empty.\n" << usage; return -1; } if (out_graph_string.empty()) { LOG(ERROR) << "out_graph graph can't be empty.\n" << usage; return -1; } if (transforms_string.empty()) { LOG(ERROR) << "You must specify at least one transform.\n" << usage; return -1; } string in_graph = ExpandPath(in_graph_string); string out_graph = ExpandPath(out_graph_string); std::vector<string> inputs = str_util::Split(inputs_string, ','); std::vector<string> outputs = str_util::Split(outputs_string, ','); TransformParameters transform_params; Status parse_status = ParseTransformParameters(transforms_string, &transform_params); if (!parse_status.ok()) { LOG(ERROR) << "Failed to parse --transform argument, error was " << parse_status.message(); return -1; } if (transform_params.empty()) { LOG(ERROR) << "You must specify at least one transform.\n" << usage; return -1; } GraphDef graph_def; Status load_status = LoadTextOrBinaryGraphFile(in_graph, &graph_def); if (!load_status.ok()) { LOG(ERROR) << "Loading graph '" << in_graph_string << "' failed with " << load_status.message(); LOG(ERROR) << usage; return -1; } Status transform_result = TransformGraph(inputs, outputs, transform_params, &graph_def); if (!transform_result.ok()) { LOG(ERROR) << transform_result.message(); LOG(ERROR) << usage; return -1; } Status save_status; if (output_as_text) { save_status = WriteTextProto(Env::Default(), out_graph, graph_def); } else { save_status = WriteBinaryProto(Env::Default(), out_graph, graph_def); } if (!save_status.ok()) { LOG(ERROR) << "Saving graph '" << out_graph_string << "' failed with " << save_status.message(); return -1; } return 0; } Status ShouldIgnoreErrors(const TransformFuncParameters& transform_params, bool* ignore_errors) { *ignore_errors = false; if (transform_params.count("ignore_errors") && (!transform_params.at("ignore_errors").empty())) { const string& ignore_errors_string = absl::AsciiStrToLower(transform_params.at("ignore_errors").at(0)); if (ignore_errors_string == "true") { *ignore_errors = true; } else if (ignore_errors_string == "false") { *ignore_errors = false; } else { return errors::InvalidArgument( "ignore_errors should be true or false, found ", ignore_errors_string); } } return OkStatus(); } Status TransformGraph(const std::vector<string>& inputs, const std::vector<string>& outputs, const TransformParameters& transform_params, GraphDef* graph_def) { TransformRegistry* transform_registry = GetTransformRegistry(); for (const auto& transform_info : transform_params) { const string& transform_name = transform_info.first; if (transform_name.empty()) { continue; } if (!transform_registry->count(transform_name)) { return errors::InvalidArgument("Transform '", transform_name, "' not recognized."); } LOG(INFO) << "Applying " << transform_name; const TransformFunc& transform_func = transform_registry->at(transform_name); TransformFuncContext context; context.input_names = inputs; context.output_names = outputs; context.params = transform_info.second; bool ignore_errors; TF_RETURN_IF_ERROR( ShouldIgnoreErrors(transform_info.second, &ignore_errors)); GraphDef transformed_graph_def; Status transform_result = transform_func(*graph_def, context, &transformed_graph_def); if (!transform_result.ok()) { if (ignore_errors) { LOG(ERROR) << transform_name << ": Ignoring error " << transform_result.message(); transformed_graph_def = *graph_def; } else { return transform_result; } } *transformed_graph_def.mutable_library() = graph_def->library(); TF_RETURN_IF_ERROR(IsGraphValid(transformed_graph_def)); *graph_def = transformed_graph_def; } return OkStatus(); } } }
#include "tensorflow/tools/graph_transforms/transform_graph.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status ShouldIgnoreErrors(const TransformFuncParameters& transform_params, bool* ignore_errors); namespace { Status test_empty_graph_transform(const GraphDef& graph_def, const TransformFuncContext& context, GraphDef* result) { result->Clear(); return OkStatus(); } } REGISTER_GRAPH_TRANSFORM("test_empty_graph_transform", test_empty_graph_transform); class TransformGraphTest : public ::testing::Test { protected: void TestConstantFolding() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a_expect_removed"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b_expect_removed"), Input::Initializer(b_data)); Output add = Add(root.WithOpName("add_expect_removed"), a_const, b_const); Output placeholder = Placeholder(root.WithOpName("placeholder_expect_remains"), DT_FLOAT); Output mul = Mul(root.WithOpName("output_expect_remains"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); string graph_def_serialized; graph_def.SerializeToString(&graph_def_serialized); const string dir = testing::TmpDir(); const string in_filename_pb = io::JoinPath(dir, "in_graphdef.pb"); const string out_filename_pb = io::JoinPath(dir, "out_graphdef.pb"); TF_ASSERT_OK(WriteStringToFile(Env::Default(), in_filename_pb, graph_def_serialized)); std::vector<string> args = {"some_binary", "--in_graph=" + in_filename_pb, "--out_graph=" + out_filename_pb, "--inputs=placeholder_expect_remains", "--outputs=output_expect_remains", "--transforms=fold_constants"}; const int argc = 6; EXPECT_EQ(argc, args.size()); char* argv[argc]; std::vector<char*> char_strings; for (int i = 0; i < argc; ++i) { string arg = args[i]; char* char_string = new char[arg.size() + 1]; std::copy_n(arg.c_str(), arg.size() + 1, char_string); argv[i] = char_string; char_strings.push_back(char_string); } ParseFlagsAndTransformGraph(argc, argv, false); for (char* char_string : char_strings) { delete[] char_string; } GraphDef out_graph_def; TF_EXPECT_OK( ReadBinaryProto(Env::Default(), out_filename_pb, &out_graph_def)); std::map<string, const NodeDef*> out_node_map; graph_transforms::MapNamesToNodes(out_graph_def, &out_node_map); for (const NodeDef& node : out_graph_def.node()) { const int occurrence_count = out_node_map.count(node.name()); if (str_util::EndsWith(node.name(), "expect_removed")) { EXPECT_EQ(0, occurrence_count) << "node.name()=" << node.name(); } if (str_util::EndsWith(node.name(), "expect_remains")) { EXPECT_EQ(1, occurrence_count) << "node.name()=" << node.name(); } } } void TestTransformRegistration() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Output placeholder = Placeholder(root.WithOpName("placeholder_expect_remains"), DT_FLOAT); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); EXPECT_EQ(1, graph_def.node().size()); TF_ASSERT_OK(TransformGraph({}, {}, {{"test_empty_graph_transform", {}}}, &graph_def)); EXPECT_EQ(0, graph_def.node().size()); TF_ASSERT_OK(root.ToGraphDef(&graph_def)); Status no_such_status = TransformGraph({}, {}, {{"test_no_such_transform", {}}}, &graph_def); EXPECT_TRUE(absl::StrContains(no_such_status.ToString(), "not recognized")); } void TestParseTransformParameters() { TransformParameters params_list; TF_EXPECT_OK(ParseTransformParameters("foo", &params_list)); EXPECT_EQ(1, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_TRUE(params_list[0].second.empty()); TF_EXPECT_OK(ParseTransformParameters("foo bar", &params_list)); EXPECT_EQ(2, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_TRUE(params_list[0].second.empty()); EXPECT_EQ("bar", params_list[1].first); EXPECT_TRUE(params_list[1].second.empty()); TF_EXPECT_OK(ParseTransformParameters("foo() bar()", &params_list)); EXPECT_EQ(2, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_TRUE(params_list[0].second.empty()); EXPECT_EQ("bar", params_list[1].first); EXPECT_TRUE(params_list[1].second.empty()); TF_EXPECT_OK( ParseTransformParameters("foo(bob_something=sue)", &params_list)); EXPECT_EQ(1, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_EQ(1, params_list[0].second.count("bob_something")); EXPECT_EQ(1, params_list[0].second["bob_something"].size()); EXPECT_EQ("sue", params_list[0].second["bob_something"][0]); TF_EXPECT_OK(ParseTransformParameters("bar(a=1, b=2, a=3)", &params_list)); EXPECT_EQ(1, params_list.size()); EXPECT_EQ("bar", params_list[0].first); EXPECT_EQ(1, params_list[0].second.count("a")); EXPECT_EQ(2, params_list[0].second["a"].size()); EXPECT_EQ("1", params_list[0].second["a"][0]); EXPECT_EQ("3", params_list[0].second["a"][1]); EXPECT_EQ(1, params_list[0].second.count("b")); EXPECT_EQ(1, params_list[0].second["b"].size()); EXPECT_EQ("2", params_list[0].second["b"][0]); TF_EXPECT_OK(ParseTransformParameters("bar(a=\"1\", b=\"1,2,3\", a=3)", &params_list)); EXPECT_EQ(1, params_list.size()); EXPECT_EQ("bar", params_list[0].first); EXPECT_EQ(1, params_list[0].second.count("a")); EXPECT_EQ(2, params_list[0].second["a"].size()); EXPECT_EQ("1", params_list[0].second["a"][0]); EXPECT_EQ("3", params_list[0].second["a"][1]); EXPECT_EQ(1, params_list[0].second.count("b")); EXPECT_EQ(1, params_list[0].second["b"].size()); EXPECT_EQ("1,2,3", params_list[0].second["b"][0]); } void TestParseEscapedNewline() { TransformParameters params_list; ParseTransformParameters("\\\n", &params_list).IgnoreError(); EXPECT_EQ(0, params_list.size()); } void TestParseExtraSpaces() { TransformParameters params_list; ParseTransformParameters(" ", &params_list).IgnoreError(); EXPECT_EQ(0, params_list.size()); TF_EXPECT_OK(ParseTransformParameters(" foo bar \\\n", &params_list)); EXPECT_EQ(2, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_TRUE(params_list[0].second.empty()); EXPECT_EQ("bar", params_list[1].first); EXPECT_TRUE(params_list[1].second.empty()); } void TestShouldIgnoreErrors() { bool ignore_errors; TF_EXPECT_OK( ShouldIgnoreErrors({{"ignore_errors", {"true"}}}, &ignore_errors)); EXPECT_TRUE(ignore_errors); TF_EXPECT_OK( ShouldIgnoreErrors({{"ignore_errors", {"false"}}}, &ignore_errors)); EXPECT_FALSE(ignore_errors); TF_EXPECT_OK(ShouldIgnoreErrors({}, &ignore_errors)); EXPECT_FALSE(ignore_errors); EXPECT_FALSE( ShouldIgnoreErrors({{"ignore_errors", {"foo"}}}, &ignore_errors).ok()); } }; TEST_F(TransformGraphTest, TestConstantFolding) { TestConstantFolding(); } TEST_F(TransformGraphTest, TestTransformRegistration) { TestTransformRegistration(); } TEST_F(TransformGraphTest, TestParseTransformParameters) { TestParseTransformParameters(); } TEST_F(TransformGraphTest, TestParseEscapedNewline) { TestParseEscapedNewline(); } TEST_F(TransformGraphTest, TestShouldIgnoreErrors) { TestShouldIgnoreErrors(); } } }
Status ShouldIgnoreErrors(const TransformFuncParameters& transform_params, bool* ignore_errors) { *ignore_errors = false; if (transform_params.count("ignore_errors") && (!transform_params.at("ignore_errors").empty())) { const string& ignore_errors_string = absl::AsciiStrToLower(transform_params.at("ignore_errors").at(0)); if (ignore_errors_string == "true") { *ignore_errors = true; } else if (ignore_errors_string == "false") { *ignore_errors = false; } else { return errors::InvalidArgument( "ignore_errors should be true or false, found ", ignore_errors_string); } } return OkStatus(); }
TEST_F(TransformGraphTest, TestShouldIgnoreErrors) { TestShouldIgnoreErrors(); }
#include "tensorflow/compiler/tf2xla/kernels/while_op.h" #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/container/inlined_vector.h" #include "absl/log/log.h" #include "absl/strings/str_join.h" #include "tensorflow/compiler/tf2xla/kernels/if_while_utils.h" #include "tensorflow/compiler/tf2xla/kernels/tensor_list_utils.h" #include "tensorflow/compiler/tf2xla/side_effect_util.h" #include "tensorflow/compiler/tf2xla/tf2xla_util.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/compiler/tf2xla/xla_resource.h" #include "xla/client/client.h" #include "xla/client/lib/tuple.h" #include "xla/client/xla_builder.h" #include "xla/client/xla_computation.h" #include "xla/shape.h" #include "xla/shape_tree.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/types.h" #include "tsl/platform/errors.h" namespace tensorflow { namespace { Status VerifyResourceArgsGroupedAtEnd(XlaOpKernelContext* ctx, const NameAttrList& body_name_attr) { const FunctionBody* body; TF_RETURN_IF_ERROR(ctx->compiler()->FindFunctionBody(body_name_attr, &body)); bool has_seen_resource = false; for (int i = 0; i < body->arg_types.size(); i++) { DataType arg_type = body->arg_types[i]; if (has_seen_resource) { if (arg_type != DT_RESOURCE) { return errors::InvalidArgument( "Expect input resources are grouped in the end of while body ", body_name_attr.name(), ", but the ", i, "-th argument ", body->arg_nodes[i]->name(), " is not a resource."); } } else { if (arg_type == DT_RESOURCE) { has_seen_resource = true; } } } return absl::OkStatus(); } Status MakeXlaCompilerArgumentsFromInputs( XlaOpKernelContext* ctx, std::vector<XlaCompiler::Argument>* args, bool* has_uninitialized_vars, bool* has_tensor_arrays, bool* has_uninitialized_tensor_lists) { VLOG(2) << "Num inputs " << ctx->num_inputs(); args->resize(ctx->num_inputs()); *has_uninitialized_vars = false; *has_tensor_arrays = false; *has_uninitialized_tensor_lists = false; for (int i = 0; i < ctx->num_inputs(); ++i) { VLOG(2) << " Input " << i << " type: " << DataTypeString(ctx->input_type(i)) << " shape: " << ctx->InputShape(i).DebugString(); XlaCompiler::Argument& arg = (*args)[i]; DataType type = ctx->input_type(i); if (type == DT_RESOURCE) { XlaResource* resource; TF_RETURN_IF_ERROR(ctx->GetResourceInput(i, &resource)); XlaCompiler::PopulateArgumentFromResource(*resource, &arg); if (arg.resource_kind == XlaResource::kTensorArray) { *has_tensor_arrays = true; } if (!arg.initialized) { *has_uninitialized_vars = true; } VLOG(2) << " resource " << resource->name() << " type: " << DataTypeString(arg.type) << " shape: " << arg.ShapeHumanString() << " initialized: " << arg.initialized; } else { arg.kind = XlaCompiler::Argument::kParameter; arg.type = type; TF_ASSIGN_OR_RETURN(arg.shape, ctx->builder()->GetShape(ctx->Input(i))); if (IsTensorListInput(ctx, i)) { TF_RETURN_IF_ERROR( IsTensorListInitialized(ctx->Input(i), &arg.initialized)); if (!arg.initialized) { *has_uninitialized_tensor_lists = true; } } } } return absl::OkStatus(); } void GetLoopInvariants(XlaOpKernelContext* ctx, const NameAttrList& body_name_attr, std::vector<bool>* const loop_invariants) { const FunctionBody* body; OP_REQUIRES_OK(ctx, ctx->compiler()->FindFunctionBody(body_name_attr, &body)); const tensorflow::FunctionLibraryDefinition* fld = ctx->compiler()->flib_runtime()->GetFunctionLibraryDefinition(); for (int i = 0; i < body->ret_nodes.size(); i++) { absl::StatusOr<bool> is_loop_invariant = IsLoopInvariant(body, i, fld); OP_REQUIRES_OK(ctx, is_loop_invariant.status()); (*loop_invariants)[i] = *is_loop_invariant; VLOG(2) << "Arg " << i << " of " << body_name_attr.name() << " is " << ((*loop_invariants)[i] ? "" : "not ") << "loop invariant"; } } Status ConvertLoopInvariantsToConst( XlaOpKernelContext* ctx, const NameAttrList& body_name_attr, const NameAttrList& cond_name_attr, std::vector<XlaCompiler::Argument>* args, std::vector<bool>* compile_time_const_arg_indices, int* num_compile_time_const_args, xla::Client* client) { std::vector<bool> loop_invariants(ctx->num_inputs()); GetLoopInvariants(ctx, body_name_attr, &loop_invariants); std::vector<bool> body_must_be_const_nodes; const FunctionBody* body; std::vector<bool> cond_must_be_const_nodes; const FunctionBody* cond; TF_RETURN_IF_ERROR(FindMustBeConstNodes(ctx, body_name_attr, &body_must_be_const_nodes, &body)); TF_RETURN_IF_ERROR(FindMustBeConstNodes(ctx, cond_name_attr, &cond_must_be_const_nodes, &cond)); auto should_convert_to_const = [&](int arg_idx) { XlaCompiler::Argument& arg = (*args)[arg_idx]; return arg.kind != XlaCompiler::Argument::kResource && loop_invariants[arg_idx] && (body_must_be_const_nodes[body->arg_nodes[arg_idx]->id()] || cond_must_be_const_nodes[cond->arg_nodes[arg_idx]->id()]); }; absl::InlinedVector<int, 5> converted_constants = ConvertCompileTimeConstArgumentsToConst(ctx, args, 0, should_convert_to_const); VLOG(2) << "Converted args to constants: {" << absl::StrJoin(converted_constants, ",") << "}"; for (int arg_idx : converted_constants) { compile_time_const_arg_indices->at(arg_idx) = true; (*num_compile_time_const_args)++; } return absl::OkStatus(); } Status VerifyBodyInputAndOutputShapeMatch( XlaOpKernelContext* ctx, const std::vector<bool>& compile_time_const_arg_indices, const XlaCompiler::CompilationResult& body, bool has_token_input_output) { xla::Shape body_input_shape = body.xla_input_shapes[0]; xla::Shape body_output_shape; body_output_shape.set_element_type(xla::TUPLE); for (int i = 0; i < ctx->num_outputs(); i++) { if (!compile_time_const_arg_indices[i]) { *(body_output_shape.add_tuple_shapes()) = body.xla_output_shape.tuple_shapes(i); } } if (has_token_input_output) { *(body_output_shape.add_tuple_shapes()) = body.xla_output_shape.tuple_shapes(ctx->num_inputs()); } if (!xla::ShapeUtil::Compatible(body_input_shape, body_output_shape)) { return errors::InvalidArgument( "Input and output shapes of loop body do not match: ", xla::ShapeUtil::HumanString(body_input_shape), " vs. ", xla::ShapeUtil::HumanString(body_output_shape)); } return absl::OkStatus(); } absl::StatusOr<xla::XlaComputation> BuildWrappedCond( XlaOpKernelContext* ctx, const XlaCompiler::CompilationResult& cond) { xla::Shape cond_input_shape = cond.xla_input_shapes[0]; std::unique_ptr<xla::XlaBuilder> cb = ctx->builder()->CreateSubBuilder("cond_wrapper"); auto inputs = xla::Parameter(cb.get(), 0, cond_input_shape, "inputs"); auto outputs = xla::Call(cb.get(), *cond.computation, {inputs}); xla::GetTupleElement(outputs, 0); return cb->Build(); } absl::StatusOr<xla::XlaComputation> BuildWrappedBody( XlaOpKernelContext* ctx, const XlaCompiler::CompilationResult& body, const std::vector<bool>& compile_time_const_arg_indices, int num_compile_time_const_args, bool has_token_input_output) { if (num_compile_time_const_args <= 0 && body.xla_input_shapes[0] == body.xla_output_shape) { return xla::XlaComputation(body.computation->proto()); } xla::XlaComputation body_wrapper; std::unique_ptr<xla::XlaBuilder> cb = ctx->builder()->CreateSubBuilder("body_wrapper"); xla::Shape body_input_shape = body.xla_input_shapes[0]; auto inputs = xla::Parameter(cb.get(), 0, body_input_shape, "inputs"); auto outputs = xla::Call(cb.get(), *body.computation, {inputs}); std::vector<xla::XlaOp> non_compile_time_const_outputs; int input_num = 0; for (int i = 0; i < compile_time_const_arg_indices.size(); i++) { if (!compile_time_const_arg_indices[i]) { xla::XlaOp output = xla::GetTupleElement(outputs, i); const xla::Shape& input_shape = body_input_shape.tuple_shapes(input_num); const xla::Shape& output_shape = body.xla_output_shape.tuple_shapes(i); TF_RET_CHECK(xla::ShapeUtil::Compatible(input_shape, output_shape)); if (input_shape != output_shape) { TF_ASSIGN_OR_RETURN(xla::ShapeTree<xla::XlaOp> disassembled_tuple, xla::DisassembleTuple(output)); disassembled_tuple.ForEachMutableElement( [&](const xla::ShapeIndex& index, xla::XlaOp* element) { const xla::Shape& output_subshape = xla::ShapeUtil::GetSubshape(output_shape, index); if (output_subshape.IsArray()) { const xla::Shape& input_subshape = xla::ShapeUtil::GetSubshape(input_shape, index); for (int d = 0; d < output_subshape.rank(); ++d) { if (input_subshape.is_dynamic_dimension(d) && !output_subshape.is_dynamic_dimension(d)) { *element = xla::SetDimensionSize( *element, xla::ConstantR0( cb.get(), static_cast<int32_t>(output_shape.dimensions()[d])), d); } } } }); output = xla::AssembleTuple(output.builder(), std::move(disassembled_tuple)); } non_compile_time_const_outputs.push_back(output); ++input_num; } } if (has_token_input_output) { non_compile_time_const_outputs.push_back( xla::GetTupleElement(outputs, ctx->num_outputs())); } xla::Tuple(cb.get(), non_compile_time_const_outputs); return cb->Build(); } xla::XlaOp BuildWhile(XlaOpKernelContext* ctx, const xla::XlaComputation& wrapped_cond, const xla::XlaComputation& wrapped_body, const xla::XlaOp initial_values, const std::vector<int>& input_mapping, const std::vector<bool>& compile_time_const_arg_indices, int num_compile_time_const_args, bool has_token_input_output) { xla::XlaOp while_result = xla::While(wrapped_cond, wrapped_body, initial_values); std::vector<xla::XlaOp> padded_while_outputs(ctx->num_outputs()); int while_result_index = 0; for (int i = 0; i < ctx->num_inputs(); i++) { if (!compile_time_const_arg_indices[i]) { padded_while_outputs[input_mapping[while_result_index]] = xla::GetTupleElement(while_result, while_result_index); while_result_index++; } else { padded_while_outputs[i] = ctx->Input(i); } } if (has_token_input_output) { padded_while_outputs.push_back(xla::GetTupleElement( while_result, ctx->num_inputs() - num_compile_time_const_args)); } return xla::Tuple(ctx->builder(), padded_while_outputs); } } XlaWhileOp::XlaWhileOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { const NameAttrList* name_attr; OP_REQUIRES_OK(ctx, ctx->GetAttr("cond", &name_attr)); cond_name_attr_ = *name_attr; OP_REQUIRES_OK(ctx, ctx->GetAttr("body", &name_attr)); body_name_attr_ = *name_attr; if (!ctx->GetAttr(kXlaTokenInputNodesAttrName, &token_input_nodes_).ok()) { has_token_input_output_ = false; } else { has_token_input_output_ = !token_input_nodes_.empty(); } if (ctx->HasAttr(kPropagateCompileTimeConsts)) { OP_REQUIRES_OK(ctx, ctx->GetAttr(kPropagateCompileTimeConsts, &propagate_compile_time_consts_)); } if (!ctx->GetAttr(kXlaOriginalOutsideCompilationNodeName, &original_node_name_) .ok()) original_node_name_ = name(); } void XlaWhileOp::Compile(XlaOpKernelContext* ctx) { VLOG(1) << "WhileOp::Compile"; OP_REQUIRES_OK(ctx, VerifyResourceArgsGroupedAtEnd(ctx, body_name_attr_)); std::vector<XlaCompiler::Argument> arguments; bool has_uninitialized_vars; bool has_tensor_arrays; bool has_uninitialized_tensor_lists; OP_REQUIRES_OK(ctx, MakeXlaCompilerArgumentsFromInputs( ctx, &arguments, &has_uninitialized_vars, &has_tensor_arrays, &has_uninitialized_tensor_lists)); xla::XlaBuilder* builder = ctx->builder(); XlaCompiler* compiler = ctx->compiler(); std::vector<bool> compile_time_const_arg_indices(ctx->num_inputs()); int num_compile_time_const_args = 0; if (propagate_compile_time_consts_) { OP_REQUIRES_OK(ctx, ConvertLoopInvariantsToConst( ctx, body_name_attr_, cond_name_attr_, &arguments, &compile_time_const_arg_indices, &num_compile_time_const_args, compiler->client())); } VLOG(1) << "Compiling body"; XlaCompiler::CompileOptions body_options; body_options.use_tuple_arg = true; body_options.return_updated_values_for_all_resources = true; body_options.is_entry_computation = false; body_options.add_token_input_output = has_token_input_output_; auto body = std::make_unique<XlaCompiler::CompilationResult>(); OP_REQUIRES_OK(ctx, compiler->CompileFunction(body_options, body_name_attr_, arguments, body.get())); OP_REQUIRES_OK( ctx, ctx->xla_context()->RecordCollectiveInfoFromNestedCompilationResult( *body.get())); if (has_uninitialized_vars || has_tensor_arrays || has_uninitialized_tensor_lists) { VLOG(2) << "Recompiling loop body: has_uninitialized_vars: " << has_uninitialized_vars << " has_tensor_arrays: " << has_tensor_arrays << " has_uninitialized_tensor_lists: " << has_uninitialized_tensor_lists; for (int i = 0; i < body->resource_updates.size(); ++i) { const XlaCompiler::ResourceUpdate& update = body->resource_updates[i]; XlaResource* resource; OP_REQUIRES_OK(ctx, ctx->GetResourceInput(update.input_index, &resource)); XlaCompiler::Argument& arg = arguments[update.input_index]; if (!arg.initialized) { VLOG(2) << "Update shape for argument " << update.input_index << " " << update.shape.DebugString(); arg.initialized = true; arg.shape = update.shape; OP_REQUIRES_OK(ctx, resource->SetTypeAndShape(update.type, update.shape)); OP_REQUIRES_OK(ctx, resource->SetZeroValue(builder)); } for (const string& grad_source : update.tensor_array_gradients_accessed) { VLOG(4) << "TensorArray " << resource->name() << " accessed gradient " << grad_source; XlaResource* gradient; OP_REQUIRES_OK(ctx, resource->GetOrCreateTensorArrayGradient( grad_source, builder, &gradient)); } for (const auto& gradient : resource->tensor_array_gradients()) { arg.tensor_array_gradients.insert(gradient.first); } } xla::Shape body_output_shape = body->xla_output_shape; OP_REQUIRES(ctx, body_output_shape.IsTuple(), errors::FailedPrecondition( "xla_output_shape of while body must be a tuple.")); for (int i = 0; i < arguments.size(); i++) { XlaCompiler::Argument& arg = arguments[i]; if (arg.initialized || !IsTensorListInput(ctx, i)) { continue; } arg.shape = body_output_shape.tuple_shapes(i); arg.initialized = true; } VLOG(1) << "Recompiling body with corrected resource shapes"; *body = {}; OP_REQUIRES_OK(ctx, compiler->CompileFunction(body_options, body_name_attr_, arguments, body.get())); } VLOG(1) << "Compiling condition"; XlaCompiler::CompileOptions cond_options; cond_options.use_tuple_arg = true; cond_options.is_entry_computation = false; cond_options.add_token_input_output = has_token_input_output_; XlaCompiler::CompilationResult cond; OP_REQUIRES_OK(ctx, compiler->CompileFunction(cond_options, cond_name_attr_, arguments, &cond)); OP_REQUIRES(ctx, body->xla_input_shapes.size() == 1, errors::FailedPrecondition("Expected one input shape")); xla::Shape body_input_shape = body->xla_input_shapes[0]; OP_REQUIRES(ctx, body_input_shape.IsTuple(), errors::FailedPrecondition("Expected tuple shape")); OP_REQUIRES(ctx, cond.xla_input_shapes.size() == 1, errors::FailedPrecondition("Expected one input shape")); xla::Shape cond_input_shape = cond.xla_input_shapes[0]; OP_REQUIRES(ctx, cond_input_shape.IsTuple(), errors::FailedPrecondition("Expected tuple shape")); VLOG(2) << "Body shape: " << xla::ShapeUtil::HumanString(body_input_shape) << " -> " << xla::ShapeUtil::HumanString(body->xla_output_shape); VLOG(2) << "Cond shape: " << xla::ShapeUtil::HumanString(cond_input_shape) << " -> " << xla::ShapeUtil::HumanString(cond.xla_output_shape); OP_REQUIRES(ctx, xla::ShapeUtil::Compatible(body_input_shape, cond_input_shape), errors::InvalidArgument( "Input shapes of loop body and condition do not match: ", xla::ShapeUtil::HumanString(body_input_shape), " vs. ", xla::ShapeUtil::HumanString(cond_input_shape))); OP_REQUIRES_OK(ctx, VerifyBodyInputAndOutputShapeMatch( ctx, compile_time_const_arg_indices, *body.get(), has_token_input_output_)); xla::Shape expected_cond_output_shape_without_side_effect = xla::ShapeUtil::MakeTupleShape( {xla::ShapeUtil::MakeShape(xla::PRED, {})}); xla::Shape expected_cond_output_shape_with_side_effect = xla::ShapeUtil::MakeTupleShape({xla::ShapeUtil::MakeShape(xla::PRED, {}), xla::ShapeUtil::MakeTokenShape()}); OP_REQUIRES(ctx, xla::ShapeUtil::Compatible( cond.xla_output_shape, expected_cond_output_shape_without_side_effect) || xla::ShapeUtil::Compatible( cond.xla_output_shape, expected_cond_output_shape_with_side_effect), errors::InvalidArgument( "Output shape of loop condition should be (pred[]) or " "(pred[], token[]), got: ", xla::ShapeUtil::HumanString(cond.xla_output_shape))); int num_inputs = body->input_mapping.size(); std::vector<xla::XlaOp> inputs(num_inputs); for (int i = 0; i < num_inputs; ++i) { int input_num = body->input_mapping[i]; if (has_token_input_output_ && i == num_inputs - 1) { std::vector<xla::XlaOp> token_inputs; token_inputs.reserve(token_input_nodes_.size()); for (const string& node_name : token_input_nodes_) { auto token_or = compiler->GetNodeToken(node_name); OP_REQUIRES_OK(ctx, token_or.status()); token_inputs.push_back(token_or.value()); } inputs[i] = xla::AfterAll(builder, token_inputs); } else if (ctx->input_type(input_num) == DT_RESOURCE) { XlaResource* resource; OP_REQUIRES_OK(ctx, ctx->GetResourceInput(input_num, &resource)); OP_REQUIRES_OK(ctx, resource->Pack(&inputs[i], builder)); } else if (IsTensorListInput(ctx, input_num)) { xla::XlaOp input = ctx->Input(input_num); auto input_shape_or = ctx->builder()->GetShape(input); OP_REQUIRES_OK(ctx, input_shape_or.status()); xla::Shape input_shape = input_shape_or.value(); const xla::Shape& list_shape = body_input_shape.tuple_shapes(i); if (input_shape != list_shape) { std::vector<std::vector<xla::XlaOp>> list_dynamic_dims; for (int i = 0; i < list_shape.tuple_shapes_size() - 1; ++i) { std::vector<xla::XlaOp> dynamic_dims; const xla::Shape& shape = list_shape.tuple_shapes(i); if (shape.is_dynamic_dimension(0)) { xla::XlaOp leading_dim_size = xla::GetDimensionSize(input, 0); dynamic_dims.push_back(leading_dim_size); } else { int32_t dim_size = shape.dimensions(0); dynamic_dims.push_back( xla::ConstantR0<int32>(ctx->builder(), dim_size)); } for (int64_t dim = 1; dim < shape.dimensions_size(); ++dim) { int32_t dim_size = shape.dimensions(dim); if (shape.is_dynamic_dimension(dim)) { dim_size = 0; } dynamic_dims.push_back( xla::ConstantR0<int32_t>(ctx->builder(), dim_size)); } list_dynamic_dims.push_back(dynamic_dims); } OP_REQUIRES_OK( ctx, CreateZerosTensorListWithShape(ctx->builder(), list_shape, list_dynamic_dims, &inputs[i])); } else { inputs[i] = ctx->Input(input_num); } } else { inputs[i] = ctx->Input(input_num); } } xla::XlaOp init = xla::Tuple(builder, inputs); VLOG(1) << "Building while loop"; absl::StatusOr<xla::XlaComputation> cond_result = BuildWrappedCond(ctx, cond); OP_REQUIRES_OK(ctx, cond_result.status()); xla::XlaComputation wrapped_cond = std::move(cond_result.value()); absl::StatusOr<xla::XlaComputation> body_result = BuildWrappedBody(ctx, *body.get(), compile_time_const_arg_indices, num_compile_time_const_args, has_token_input_output_); OP_REQUIRES_OK(ctx, body_result.status()); xla::XlaComputation wrapped_body = std::move(body_result.value()); xla::XlaOp while_result = BuildWhile(ctx, wrapped_cond, wrapped_body, init, body->input_mapping, compile_time_const_arg_indices, num_compile_time_const_args, has_token_input_output_); int resource_index = 0; for (int i = 0; i < ctx->num_outputs(); ++i) { if (ctx->input_type(i) != DT_RESOURCE) { if (IsTensorListInput(ctx, i)) { ctx->SetTensorListOutput(i, xla::GetTupleElement(while_result, i)); } else { ctx->SetOutput(i, xla::GetTupleElement(while_result, i)); } ++resource_index; } else { break; } } if (has_token_input_output_) { xla::XlaOp token_output = xla::GetTupleElement(while_result, ctx->num_outputs()); auto shape_or = builder->GetShape(token_output); OP_REQUIRES_OK(ctx, shape_or.status()); OP_REQUIRES(ctx, shape_or.value().IsToken(), errors::FailedPrecondition( "Token output is not token type: ", xla::ShapeUtil::HumanString(shape_or.value()))); OP_REQUIRES_OK(ctx, compiler->SetNodeToken(original_node_name_, token_output)); } for (int i = 0; i < body->resource_updates.size(); ++i) { const XlaCompiler::ResourceUpdate& update = body->resource_updates[i]; XlaResource* resource; OP_REQUIRES_OK(ctx, ctx->GetResourceInput(update.input_index, &resource)); if (update.modified) { int pos = resource_index + i; OP_REQUIRES_OK(ctx, resource->SetFromPack( arguments[update.input_index].tensor_array_gradients, xla::GetTupleElement(while_result, pos), builder)); } VLOG(2) << "Loop-carried variable: pos: " << update.input_index << " name: " << resource->name() << " modified: " << update.modified << " type: " << DataTypeString(update.type) << " shape: " << update.shape.DebugString(); ctx->op_kernel_context()->set_output( update.input_index, ctx->op_kernel_context()->input(update.input_index)); } VLOG(1) << "Done building while loop"; } REGISTER_XLA_OP(Name("While").AllowResourceTypes().AllowVariantTypes(), XlaWhileOp); REGISTER_XLA_OP(Name("StatelessWhile").AllowResourceTypes().AllowVariantTypes(), XlaWhileOp); REGISTER_XLA_OP(Name("XlaWhile").AllowResourceTypes().AllowVariantTypes(), XlaWhileOp); }
#include "tensorflow/c/experimental/stream_executor/stream_executor.h" #include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" #include "tensorflow/c/experimental/stream_executor/stream_executor_test_util.h" #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/standard_ops.h" #include "xla/stream_executor/event.h" #include "xla/stream_executor/platform_manager.h" #include "tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" namespace tensorflow { namespace { class WhileOpTest : public OpsTestBase { protected: WhileOpTest() {} void SetUp() override { stream_executor::test_util::PopulateDefaultPlatform(&platform_, &platform_fns_); stream_executor::test_util::PopulateDefaultDeviceFns(&device_fns_); stream_executor::test_util::PopulateDefaultStreamExecutor(&se_); stream_executor::test_util::PopulateDefaultTimerFns(&timer_fns_); } void TearDown() override {} SP_Platform platform_; SP_PlatformFns platform_fns_; SP_DeviceFns device_fns_; SP_StreamExecutor se_; SP_TimerFns timer_fns_; }; FunctionDef LessThanOrEqualToNWithCast(int64_t N) { typedef FunctionDefHelper FDH; const Tensor kN = test::AsScalar<int64_t>(N); return FDH::Define( "LessThanOrEqualToNWithCast", {"x: T"}, {"z: bool"}, {"T: {float, double, int32, int64}"}, { {{"N"}, "Const", {}, {{"value", kN}, {"dtype", DT_INT64}}}, {{"y"}, "_HostCast", {"N"}, {{"SrcT", DT_INT64}, {"DstT", DT_INT32}}}, {{"x_cst"}, "_HostCast", {"x"}, {{"SrcT", "$T"}, {"DstT", DT_INT32}}}, {{"z"}, "LessEqual", {"x_cst", "y"}, {{"T", DT_INT32}}}, }); } FunctionDef XTimesTwoWithCast() { typedef FunctionDefHelper FDH; const Tensor kTwo = test::AsScalar<int64_t>(2); return FDH::Define( "XTimesTwoWithCast", {"x: T"}, {"y: T"}, {"T: {float, double, int32, int64}"}, { {{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}}, {{"two_cst"}, "_HostCast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", DT_INT32}}}, {{"x_cst"}, "_HostCast", {"x"}, {{"SrcT", "$T"}, {"DstT", DT_INT32}}}, {{"y_cast"}, "Mul", {"x_cst", "two_cst"}, {{"T", DT_INT32}}}, {{"y"}, "_HostCast", {"y_cast"}, {{"SrcT", DT_INT32}, {"DstT", "$T"}}}, }); } TEST_F(WhileOpTest, WhileOpCPUBuildWithPluggableDevice) { const std::string platform_name = "MY_TEST"; const std::string platform_type = "FAKE"; platform_.name = platform_name.c_str(); platform_.type = platform_type.c_str(); static bool memcpy_d2h_called = false; se_.memcpy_dtoh = [](const SP_Device* device, SP_Stream stream, void* host_dst, const SP_DeviceMemoryBase* device_src, uint64_t size, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); memcpy_d2h_called = true; std::memcpy(host_dst, device_src->opaque, size); }; se_.memcpy_htod = [](const SP_Device* const device, SP_Stream stream, SP_DeviceMemoryBase* const device_dst, const void* host_src, uint64_t size, TF_Status* const status) { TF_SetStatus(status, TF_OK, ""); std::memcpy(device_dst->opaque, host_src, size); }; se_.host_memory_allocate = [](const SP_Device* const device, uint64_t size) { #if EIGEN_MAX_ALIGN_BYTES == 0 return malloc(size); #else return tensorflow::port::AlignedMalloc(size, EIGEN_MAX_ALIGN_BYTES); #endif }; se_.host_memory_deallocate = [](const SP_Device* const device, void* mem) { free(mem); }; se_.allocate = [](const SP_Device* const device, uint64_t size, int64_t memory_space, SP_DeviceMemoryBase* const mem) { mem->struct_size = SP_DEVICE_MEMORY_BASE_STRUCT_SIZE; #if EIGEN_MAX_ALIGN_BYTES == 0 mem->opaque = malloc(size); #else mem->opaque = tensorflow::port::AlignedMalloc(size, EIGEN_MAX_ALIGN_BYTES); #endif mem->size = size; }; se_.deallocate = [](const SP_Device* const device, SP_DeviceMemoryBase* const mem) { free(mem->opaque); mem->opaque = nullptr; mem->size = 0; }; static SE_EventStatus event_status = SE_EVENT_COMPLETE; se_.create_event = [](const SP_Device* const device, SP_Event* event, TF_Status* const status) -> void { *event = new SP_Event_st(666); }; se_.destroy_event = [](const SP_Device* const device, SP_Event event) -> void { delete event; }; se_.get_event_status = [](const SP_Device* const device, SP_Event event) -> SE_EventStatus { EXPECT_EQ(event->event_id, 666); return event_status; }; std::unique_ptr<stream_executor::CPlatform> cplatform( new stream_executor::CPlatform( std::move(platform_), stream_executor::test_util::DestroyPlatform, std::move(platform_fns_), stream_executor::test_util::DestroyPlatformFns, std::move(device_fns_), std::move(se_), std::move(timer_fns_))); TF_CHECK_OK( stream_executor::PlatformManager::RegisterPlatform(std::move(cplatform))); DeviceFactory::Register( platform_type, new PluggableDeviceFactory(platform_type, platform_name), 220, true); std::unique_ptr<Device> plug_device( DeviceFactory::NewDevice(platform_type, {}, "/job:a/replica:0")); OpsTestBase::SetDevice(platform_type.c_str(), std::move(plug_device)); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); Scope root = Scope::NewRootScope().ExitOnError(); FunctionDef x_times_two = XTimesTwoWithCast(); FunctionDef less_than_or_eq = LessThanOrEqualToNWithCast(8); FunctionDefLibrary f_lib_proto; *f_lib_proto.add_function() = x_times_two; *f_lib_proto.add_function() = less_than_or_eq; TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto)); auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT); AttrValue cond_func; cond_func.mutable_func()->set_name("LessThanOrEqualToNWithCast"); (*cond_func.mutable_func()->mutable_attr())["T"].set_type(DT_FLOAT); AttrValue body_func; body_func.mutable_func()->set_name("XTimesTwoWithCast"); (*body_func.mutable_func()->mutable_attr())["T"].set_type(DT_FLOAT); std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())}); Node* node; TF_EXPECT_OK(NodeBuilder("while_test", "While", &root.graph()->flib_def()) .Input(inputs) .Attr("T", {DT_FLOAT}) .Attr("cond", cond_func) .Attr("body", body_func) .Attr("parallel_iterations", 100) .Finalize(root.graph(), &node)); auto c = ops::Identity( root.WithOpName("C").WithControlDependencies(Output(node)), Output(node)); TF_ASSERT_OK(root.DoShapeInference(node)); TF_ASSERT_OK(root.ToGraph(graph.get())); ClientSession session(root); { ClientSession::FeedType feeds; feeds.emplace(Output(a.node()), Input::Initializer(1.f)); std::vector<Tensor> out_tensors; TF_ASSERT_OK(session.Run(feeds, {Output(c.node())}, &out_tensors)); ASSERT_EQ(memcpy_d2h_called, true); ASSERT_EQ(out_tensors.size(), 1); EXPECT_EQ(out_tensors[0].scalar<float>()(), 16.f); } } } }
void GetLoopInvariants(XlaOpKernelContext* ctx, const NameAttrList& body_name_attr, std::vector<bool>* const loop_invariants) { const FunctionBody* body; OP_REQUIRES_OK(ctx, ctx->compiler()->FindFunctionBody(body_name_attr, &body)); const tensorflow::FunctionLibraryDefinition* fld = ctx->compiler()->flib_runtime()->GetFunctionLibraryDefinition(); for (int i = 0; i < body->ret_nodes.size(); i++) { absl::StatusOr<bool> is_loop_invariant = IsLoopInvariant(body, i, fld); OP_REQUIRES_OK(ctx, is_loop_invariant.status()); (*loop_invariants)[i] = *is_loop_invariant; VLOG(2) << "Arg " << i << " of " << body_name_attr.name() << " is " << ((*loop_invariants)[i] ? "" : "not ") << "loop invariant"; } }
TEST_F(WhileOpTest, WhileOpCPUBuildWithPluggableDevice) { const std::string platform_name = "MY_TEST"; const std::string platform_type = "FAKE"; platform_.name = platform_name.c_str(); platform_.type = platform_type.c_str(); static bool memcpy_d2h_called = false; se_.memcpy_dtoh = [](const SP_Device* device, SP_Stream stream, void* host_dst, const SP_DeviceMemoryBase* device_src, uint64_t size, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); memcpy_d2h_called = true; std::memcpy(host_dst, device_src->opaque, size); }; se_.memcpy_htod = [](const SP_Device* const device, SP_Stream stream, SP_DeviceMemoryBase* const device_dst, const void* host_src, uint64_t size, TF_Status* const status) { TF_SetStatus(status, TF_OK, ""); std::memcpy(device_dst->opaque, host_src, size); }; se_.host_memory_allocate = [](const SP_Device* const device, uint64_t size) { #if EIGEN_MAX_ALIGN_BYTES == 0 return malloc(size); #else return tensorflow::port::AlignedMalloc(size, EIGEN_MAX_ALIGN_BYTES); #endif }; se_.host_memory_deallocate = [](const SP_Device* const device, void* mem) { free(mem); }; se_.allocate = [](const SP_Device* const device, uint64_t size, int64_t memory_space, SP_DeviceMemoryBase* const mem) { mem->struct_size = SP_DEVICE_MEMORY_BASE_STRUCT_SIZE; #if EIGEN_MAX_ALIGN_BYTES == 0 mem->opaque = malloc(size); #else mem->opaque = tensorflow::port::AlignedMalloc(size, EIGEN_MAX_ALIGN_BYTES); #endif mem->size = size; }; se_.deallocate = [](const SP_Device* const device, SP_DeviceMemoryBase* const mem) { free(mem->opaque); mem->opaque = nullptr; mem->size = 0; }; static SE_EventStatus event_status = SE_EVENT_COMPLETE; se_.create_event = [](const SP_Device* const device, SP_Event* event, TF_Status* const status) -> void { *event = new SP_Event_st(666); }; se_.destroy_event = [](const SP_Device* const device, SP_Event event) -> void { delete event; }; se_.get_event_status = [](const SP_Device* const device, SP_Event event) -> SE_EventStatus { EXPECT_EQ(event->event_id, 666); return event_status; }; std::unique_ptr<stream_executor::CPlatform> cplatform( new stream_executor::CPlatform( std::move(platform_), stream_executor::test_util::DestroyPlatform, std::move(platform_fns_), stream_executor::test_util::DestroyPlatformFns, std::move(device_fns_), std::move(se_), std::move(timer_fns_))); TF_CHECK_OK( stream_executor::PlatformManager::RegisterPlatform(std::move(cplatform))); DeviceFactory::Register( platform_type, new PluggableDeviceFactory(platform_type, platform_name), 220, true); std::unique_ptr<Device> plug_device( DeviceFactory::NewDevice(platform_type, {}, "/job:a/replica:0")); OpsTestBase::SetDevice(platform_type.c_str(), std::move(plug_device)); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); Scope root = Scope::NewRootScope().ExitOnError(); FunctionDef x_times_two = XTimesTwoWithCast(); FunctionDef less_than_or_eq = LessThanOrEqualToNWithCast(8); FunctionDefLibrary f_lib_proto; *f_lib_proto.add_function() = x_times_two; *f_lib_proto.add_function() = less_than_or_eq; TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto)); auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT); AttrValue cond_func; cond_func.mutable_func()->set_name("LessThanOrEqualToNWithCast"); (*cond_func.mutable_func()->mutable_attr())["T"].set_type(DT_FLOAT); AttrValue body_func; body_func.mutable_func()->set_name("XTimesTwoWithCast"); (*body_func.mutable_func()->mutable_attr())["T"].set_type(DT_FLOAT); std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())}); Node* node; TF_EXPECT_OK(NodeBuilder("while_test", "While", &root.graph()->flib_def()) .Input(inputs) .Attr("T", {DT_FLOAT}) .Attr("cond", cond_func) .Attr("body", body_func) .Attr("parallel_iterations", 100) .Finalize(root.graph(), &node)); auto c = ops::Identity( root.WithOpName("C").WithControlDependencies(Output(node)), Output(node)); TF_ASSERT_OK(root.DoShapeInference(node)); TF_ASSERT_OK(root.ToGraph(graph.get())); ClientSession session(root); { ClientSession::FeedType feeds; feeds.emplace(Output(a.node()), Input::Initializer(1.f)); std::vector<Tensor> out_tensors; TF_ASSERT_OK(session.Run(feeds, {Output(c.node())}, &out_tensors)); ASSERT_EQ(memcpy_d2h_called, true); ASSERT_EQ(out_tensors.size(), 1); EXPECT_EQ(out_tensors[0].scalar<float>()(), 16.f); } }
#include "tensorflow/core/common_runtime/collective_rma_local.h" #include "tensorflow/core/common_runtime/copy_tensor.h" #include "tensorflow/core/common_runtime/dma_helper.h" namespace tensorflow { void CollectiveRemoteAccessLocal::StartAbort(const Status& s) { buf_rendezvous_.StartAbort(s); } void CollectiveRemoteAccessLocal::RecvFromPeer( const string& peer_device, const string& peer_task, bool peer_is_local, const string& key, Device* to_device, DeviceContext* to_device_ctx, const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor, const DeviceLocality& client_locality, int dev_to_dev_stream_index, CancellationManager* cancellation_manager, const StatusCallback& done) { VLOG(1) << "RecvFromPeer " << this << " from " << peer_device << " key " << key; if (!peer_is_local) { done( errors::Internal("CollectiveRemoteAccessLocal::RecvFromPeer " "called with peer_is_local=false")); return; } Device* from_device; Status status = dev_mgr_->LookupDevice(peer_device, &from_device); if (!status.ok()) { done(status); return; } auto consumer_callback = [to_tensor, to_device_ctx, to_device, to_alloc_attr, dev_to_dev_stream_index, done](const Status& status, BufRendezvous::Hook* hook) { Status s = status; if (s.ok()) { if (hook == nullptr) { s = errors::Internal("Invalid null hook in ConsumeBuf callback"); } } else { if (hook != nullptr) { LOG(ERROR) << "Got hook " << hook << " with status " << s << " from ConsumeBuf"; } } if (s.ok()) { int64_t recv_bytes = to_tensor->TotalBytes(); CHECK_EQ(recv_bytes, hook->prod_value->TotalBytes()); MemCpyAsync(hook->prod_ctx, to_device_ctx, hook->prod_dev, to_device, hook->prod_attr, to_alloc_attr, hook->prod_value, to_tensor, dev_to_dev_stream_index, [hook, done](const Status& memcpy_status) { done(memcpy_status); BufRendezvous::DoneWithHook(hook); }); } else { done(s); if (hook != nullptr) { BufRendezvous::DoneWithHook(hook); } } }; buf_rendezvous_.ConsumeBuf(key, from_device->name(), from_device->attributes().incarnation(), consumer_callback, cancellation_manager); } void CollectiveRemoteAccessLocal::PostToPeer( const string& peer_device, const string& peer_task, const string& key, Device* from_device, DeviceContext* from_device_ctx, const AllocatorAttributes& from_alloc_attr, const Tensor* from_tensor, const DeviceLocality& client_locality, CancellationManager* cancellation_manager, const StatusCallback& done) { VLOG(1) << "PostToPeer " << this << " key " << key << " step_id_=" << step_id_; buf_rendezvous_.ProvideBuf(key, from_device, from_device_ctx, from_tensor, from_alloc_attr, done, cancellation_manager); } void CollectiveRemoteAccessLocal::CheckPeerHealth(const string& peer_task, int64_t timeout_in_ms, const StatusCallback& done) { done(errors::Internal( "CheckPeerHealth is not supposed to be called for local collectives")); } void CollectiveRemoteAccessLocal::MemCpyAsync( DeviceContext* src_dev_ctx, DeviceContext* dst_dev_ctx, Device* src_dev, Device* dst_dev, const AllocatorAttributes& src_attr, const AllocatorAttributes& dst_attr, const Tensor* src, Tensor* dst, int dev_to_dev_stream_index, const StatusCallback& done) { const DeviceType src_device_type( src_attr.on_host() ? DEVICE_CPU : src_dev->attributes().device_type()); const DeviceType dst_device_type( dst_attr.on_host() ? DEVICE_CPU : dst_dev->attributes().device_type()); const bool non_cpu_src = src_device_type != DeviceType(DEVICE_CPU); const bool non_cpu_dst = dst_device_type != DeviceType(DEVICE_CPU); if (src_dev_ctx == nullptr && src_device_type == DEVICE_GPU) { const DeviceBase::AcceleratorDeviceInfo* dev_info = src_dev->tensorflow_accelerator_device_info(); CHECK(dev_info); src_dev_ctx = dev_info->default_context; } if (dst_dev_ctx == nullptr && dst_device_type == DEVICE_GPU) { const DeviceBase::AcceleratorDeviceInfo* dev_info = src_dev->tensorflow_accelerator_device_info(); CHECK(dev_info); dst_dev_ctx = dev_info->default_context; } if (non_cpu_src) CHECK(src_dev_ctx); if (non_cpu_dst) CHECK(dst_dev_ctx); if (non_cpu_src || non_cpu_dst) { CopyTensor::ViaDMA("", src_dev_ctx, dst_dev_ctx, src_dev, dst_dev, src_attr, dst_attr, src, dst, dev_to_dev_stream_index, done); } else { int64_t bytes = src->TotalBytes(); DCHECK_EQ(dst->TotalBytes(), bytes); memcpy(DMAHelper::base(dst), DMAHelper::base(src), bytes); done(absl::OkStatus()); } } }
#include "tensorflow/core/common_runtime/collective_rma_local.h" #include "tensorflow/core/common_runtime/buf_rendezvous.h" #include "tensorflow/core/common_runtime/collective_param_resolver_local.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/device_resolver_local.h" #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/lib/core/notification.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/unbounded_work_queue.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { #define NUM_DEVS 3 static const int kStepId = 123; class CollectiveRemoteAccessLocalTest : public ::testing::Test { protected: const string kTaskName = "/job:localhost/replica:0/task:0"; CollectiveRemoteAccessLocalTest() { work_queue_ = std::make_shared<UnboundedWorkQueue>(Env::Default(), "test"); ConfigProto cp; SessionOptions options; auto* device_count = options.config.mutable_device_count(); device_count->insert({"CPU", NUM_DEVS}); std::vector<std::unique_ptr<Device>> devices; TF_CHECK_OK(DeviceFactory::AddDevices(options, kTaskName, &devices)); device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices)); drl_ = std::make_unique<DeviceResolverLocal>(device_mgr_.get()); prl_ = std::make_unique<CollectiveParamResolverLocal>( cp, device_mgr_.get(), drl_.get(), nullptr, kTaskName); rma_ = std::make_unique<CollectiveRemoteAccessLocal>(device_mgr_.get(), drl_.get(), kStepId); cm_ = std::make_unique<CancellationManager>(); } ~CollectiveRemoteAccessLocalTest() override = default; std::shared_ptr<UnboundedWorkQueue> work_queue_; std::unique_ptr<DeviceMgr> device_mgr_; std::unique_ptr<DeviceResolverLocal> drl_; std::unique_ptr<CollectiveParamResolverLocal> prl_; std::unique_ptr<CollectiveRemoteAccessLocal> rma_; std::unique_ptr<CancellationManager> cm_; }; TEST_F(CollectiveRemoteAccessLocalTest, PostRecvCPU0) { Device* cpu0 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0)); Tensor sink_tensor(DT_FLOAT, TensorShape({8})); Notification recv_note; Status recv_status; rma_->RecvFromPeer(kTaskName + "/device:CPU:0", kTaskName, true , "key_0", cpu0 , nullptr , attr , &sink_tensor, dev_locality, 0 , cm_.get(), [&recv_note, &recv_status](const Status& s) { recv_status = s; recv_note.Notify(); }); Tensor source_tensor(DT_FLOAT, TensorShape({8})); for (int i = 0; i < 8; ++i) { source_tensor.flat<float>()(i) = i / 2; } EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); Notification send_note; Status send_status; rma_->PostToPeer(kTaskName + "/device:CPU:0", kTaskName, "key_0", cpu0 , nullptr , attr , &source_tensor, dev_locality, cm_.get(), [&send_note, &send_status](const Status& s) { send_status = s; send_note.Notify(); }); recv_note.WaitForNotification(); send_note.WaitForNotification(); TF_EXPECT_OK(recv_status); TF_EXPECT_OK(send_status); for (int i = 0; i < 8; ++i) { EXPECT_EQ(sink_tensor.flat<float>()(i), i / 2); } EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); } TEST_F(CollectiveRemoteAccessLocalTest, PostRecvCPU1_2) { Device* cpu2 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:2", &cpu2)); Tensor sink_tensor(DT_FLOAT, TensorShape({8})); Notification recv_note; Status recv_status; rma_->RecvFromPeer(kTaskName + "/device:CPU:1", kTaskName, true , "key_0", cpu2 , nullptr , attr , &sink_tensor, dev_locality, 0 , cm_.get(), [&recv_note, &recv_status](const Status& s) { recv_status = s; recv_note.Notify(); }); Tensor source_tensor(DT_FLOAT, TensorShape({8})); for (int i = 0; i < 8; ++i) { source_tensor.flat<float>()(i) = i / 2; } EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); Device* cpu1 = nullptr; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:1", &cpu1)); Notification send_note; Status send_status; rma_->PostToPeer(kTaskName + "/device:CPU:2", kTaskName, "key_0", cpu1 , nullptr , attr , &source_tensor, dev_locality, cm_.get(), [&send_note, &send_status](const Status& s) { send_status = s; send_note.Notify(); }); recv_note.WaitForNotification(); send_note.WaitForNotification(); TF_EXPECT_OK(recv_status); TF_EXPECT_OK(send_status); for (int i = 0; i < 8; ++i) { EXPECT_EQ(sink_tensor.flat<float>()(i), i / 2); } EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); } TEST_F(CollectiveRemoteAccessLocalTest, CheckHealth) { Status status; Notification done; rma_->CheckPeerHealth(kTaskName, 0, [&status, &done](const Status& s) { status = s; done.Notify(); }); done.WaitForNotification(); EXPECT_TRUE(errors::IsInternal(status)); } TEST_F(CollectiveRemoteAccessLocalTest, RecvThenCancel) { Device* cpu0 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0)); Tensor sink_tensor(DT_FLOAT, TensorShape({8})); Notification recv_note; Status recv_status; rma_->RecvFromPeer(kTaskName + "/device:CPU:0", kTaskName, true , "key_0", cpu0 , nullptr , attr , &sink_tensor, dev_locality, 0 , cm_.get(), [&recv_note, &recv_status](const Status& s) { recv_status = s; recv_note.Notify(); }); cm_->StartCancel(); recv_note.WaitForNotification(); EXPECT_TRUE(cm_->IsCancelled()); EXPECT_TRUE(errors::IsCancelled(recv_status)); } TEST_F(CollectiveRemoteAccessLocalTest, CancelThenRecv) { Device* cpu0 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0)); Tensor sink_tensor(DT_FLOAT, TensorShape({8})); Notification recv_note; Status recv_status; cm_->StartCancel(); rma_->RecvFromPeer(kTaskName + "/device:CPU:0", kTaskName, true , "key_0", cpu0 , nullptr , attr , &sink_tensor, dev_locality, 0 , cm_.get(), [&recv_note, &recv_status](const Status& s) { recv_status = s; recv_note.Notify(); }); recv_note.WaitForNotification(); EXPECT_TRUE(cm_->IsCancelled()); EXPECT_TRUE(errors::IsCancelled(recv_status)); } TEST_F(CollectiveRemoteAccessLocalTest, PostThenCancel) { Device* cpu0 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0)); Tensor source_tensor(DT_FLOAT, TensorShape({8})); Notification send_note; Status send_status; rma_->PostToPeer(kTaskName + "/device:CPU:0", kTaskName, "key_0", cpu0 , nullptr , attr , &source_tensor, dev_locality, cm_.get(), [&send_note, &send_status](const Status& s) { send_status = s; send_note.Notify(); }); cm_->StartCancel(); send_note.WaitForNotification(); EXPECT_TRUE(cm_->IsCancelled()); EXPECT_TRUE(errors::IsCancelled(send_status)); } TEST_F(CollectiveRemoteAccessLocalTest, CancelThenPost) { Device* cpu0 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0)); Tensor source_tensor(DT_FLOAT, TensorShape({8})); Notification send_note; Status send_status; cm_->StartCancel(); rma_->PostToPeer(kTaskName + "/device:CPU:0", kTaskName, "key_0", cpu0 , nullptr , attr , &source_tensor, dev_locality, cm_.get(), [&send_note, &send_status](const Status& s) { send_status = s; send_note.Notify(); }); send_note.WaitForNotification(); EXPECT_TRUE(cm_->IsCancelled()); EXPECT_TRUE(errors::IsCancelled(send_status)); } } }
void CollectiveRemoteAccessLocal::PostToPeer( const string& peer_device, const string& peer_task, const string& key, Device* from_device, DeviceContext* from_device_ctx, const AllocatorAttributes& from_alloc_attr, const Tensor* from_tensor, const DeviceLocality& client_locality, CancellationManager* cancellation_manager, const StatusCallback& done) { VLOG(1) << "PostToPeer " << this << " key " << key << " step_id_=" << step_id_; buf_rendezvous_.ProvideBuf(key, from_device, from_device_ctx, from_tensor, from_alloc_attr, done, cancellation_manager); }
TEST_F(CollectiveRemoteAccessLocalTest, PostRecvCPU0) { Device* cpu0 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0)); Tensor sink_tensor(DT_FLOAT, TensorShape({8})); Notification recv_note; Status recv_status; rma_->RecvFromPeer(kTaskName + "/device:CPU:0", kTaskName, true , "key_0", cpu0 , nullptr , attr , &sink_tensor, dev_locality, 0 , cm_.get(), [&recv_note, &recv_status](const Status& s) { recv_status = s; recv_note.Notify(); }); Tensor source_tensor(DT_FLOAT, TensorShape({8})); for (int i = 0; i < 8; ++i) { source_tensor.flat<float>()(i) = i / 2; } EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); Notification send_note; Status send_status; rma_->PostToPeer(kTaskName + "/device:CPU:0", kTaskName, "key_0", cpu0 , nullptr , attr , &source_tensor, dev_locality, cm_.get(), [&send_note, &send_status](const Status& s) { send_status = s; send_note.Notify(); }); recv_note.WaitForNotification(); send_note.WaitForNotification(); TF_EXPECT_OK(recv_status); TF_EXPECT_OK(send_status); for (int i = 0; i < 8; ++i) { EXPECT_EQ(sink_tensor.flat<float>()(i), i / 2); } EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); } TEST_F(CollectiveRemoteAccessLocalTest, PostRecvCPU1_2) { Device* cpu2 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:2", &cpu2)); Tensor sink_tensor(DT_FLOAT, TensorShape({8})); Notification recv_note; Status recv_status; rma_->RecvFromPeer(kTaskName + "/device:CPU:1", kTaskName, true , "key_0", cpu2 , nullptr , attr , &sink_tensor, dev_locality, 0 , cm_.get(), [&recv_note, &recv_status](const Status& s) { recv_status = s; recv_note.Notify(); }); Tensor source_tensor(DT_FLOAT, TensorShape({8})); for (int i = 0; i < 8; ++i) { source_tensor.flat<float>()(i) = i / 2; } EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); Device* cpu1 = nullptr; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:1", &cpu1)); Notification send_note; Status send_status; rma_->PostToPeer(kTaskName + "/device:CPU:2", kTaskName, "key_0", cpu1 , nullptr , attr , &source_tensor, dev_locality, cm_.get(), [&send_note, &send_status](const Status& s) { send_status = s; send_note.Notify(); }); recv_note.WaitForNotification(); send_note.WaitForNotification(); TF_EXPECT_OK(recv_status); TF_EXPECT_OK(send_status); for (int i = 0; i < 8; ++i) { EXPECT_EQ(sink_tensor.flat<float>()(i), i / 2); } EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); } TEST_F(CollectiveRemoteAccessLocalTest, PostThenCancel) { Device* cpu0 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0)); Tensor source_tensor(DT_FLOAT, TensorShape({8})); Notification send_note; Status send_status; rma_->PostToPeer(kTaskName + "/device:CPU:0", kTaskName, "key_0", cpu0 , nullptr , attr , &source_tensor, dev_locality, cm_.get(), [&send_note, &send_status](const Status& s) { send_status = s; send_note.Notify(); }); cm_->StartCancel(); send_note.WaitForNotification(); EXPECT_TRUE(cm_->IsCancelled()); EXPECT_TRUE(errors::IsCancelled(send_status)); } TEST_F(CollectiveRemoteAccessLocalTest, CancelThenPost) { Device* cpu0 = nullptr; AllocatorAttributes attr; DeviceLocality dev_locality; TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0)); Tensor source_tensor(DT_FLOAT, TensorShape({8})); Notification send_note; Status send_status; cm_->StartCancel(); rma_->PostToPeer(kTaskName + "/device:CPU:0", kTaskName, "key_0", cpu0 , nullptr , attr , &source_tensor, dev_locality, cm_.get(), [&send_note, &send_status](const Status& s) { send_status = s; send_note.Notify(); }); send_note.WaitForNotification(); EXPECT_TRUE(cm_->IsCancelled()); EXPECT_TRUE(errors::IsCancelled(send_status)); }
#include "tensorflow/core/framework/run_handler_util.h" #include <cmath> #include "tensorflow/core/lib/strings/numbers.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/str_util.h" namespace tensorflow { double ParamFromEnvWithDefault(const char* var_name, double default_value) { const char* val = std::getenv(var_name); double num; return (val && strings::safe_strtod(val, &num)) ? num : default_value; } std::vector<double> ParamFromEnvWithDefault(const char* var_name, std::vector<double> default_value) { const char* val = std::getenv(var_name); if (!val) { return default_value; } std::vector<string> splits = str_util::Split(val, ","); std::vector<double> result; result.reserve(splits.size()); for (auto& split : splits) { double num; if (strings::safe_strtod(split, &num)) { result.push_back(num); } else { LOG(ERROR) << "Wrong format for " << var_name << ". Use default value."; return default_value; } } return result; } std::vector<int> ParamFromEnvWithDefault(const char* var_name, std::vector<int> default_value) { const char* val = std::getenv(var_name); if (!val) { return default_value; } std::vector<string> splits = str_util::Split(val, ","); std::vector<int> result; result.reserve(splits.size()); for (auto& split : splits) { int num; if (strings::safe_strto32(split, &num)) { result.push_back(num); } else { LOG(ERROR) << "Wrong format for " << var_name << ". Use default value."; return default_value; } } return result; } bool ParamFromEnvBoolWithDefault(const char* var_name, bool default_value) { const char* val = std::getenv(var_name); return (val) ? str_util::Lowercase(val) == "true" : default_value; } void ComputeInterOpSchedulingRanges(int num_active_requests, int num_threads, int min_threads_per_request, std::vector<std::uint_fast32_t>* start_vec, std::vector<std::uint_fast32_t>* end_vec) { float total_weight = 0.5f * num_active_requests * (num_active_requests + 1); float demand_factor = static_cast<float>(num_threads) / total_weight; float last_cumulative_weight = 0.0; min_threads_per_request = std::max(1, min_threads_per_request); for (int i = 0; i != num_active_requests; i++) { float cumulative_weight = static_cast<float>(i + 1) * (num_active_requests - static_cast<float>(i) * 0.5f); float weight = cumulative_weight - last_cumulative_weight; int demand = std::max( min_threads_per_request, static_cast<int>(std::ceil(weight * demand_factor - 0.00001f))); int start = last_cumulative_weight * demand_factor; int end = std::min(num_threads, start + demand); start = std::max(0, std::min(start, end - demand)); start_vec->at(i) = start; end_vec->at(i) = end; last_cumulative_weight = cumulative_weight; } } void ComputeInterOpStealingRanges(int num_threads, int min_threads_per_domain, std::vector<std::uint_fast32_t>* start_vec, std::vector<std::uint_fast32_t>* end_vec) { int steal_domain_size = std::min(min_threads_per_domain, num_threads); unsigned steal_start = 0, steal_end = steal_domain_size; for (int i = 0; i < num_threads; ++i) { if (i >= steal_end) { if (steal_end + steal_domain_size < num_threads) { steal_start = steal_end; steal_end += steal_domain_size; } else { steal_end = num_threads; steal_start = steal_end - steal_domain_size; } } start_vec->at(i) = steal_start; end_vec->at(i) = steal_end; } } std::vector<int> ChooseRequestsWithExponentialDistribution( int num_active_requests, int num_threads) { static const double kCapacityFractionForEvenDistribution = ParamFromEnvWithDefault("TF_RUN_HANDLER_EXP_DIST_EVEN_FRACTION", 0.5); static const double kPowerBase = ParamFromEnvWithDefault("TF_RUN_HANDLER_EXP_DIST_POWER_BASE", 2.0); static const int kMinEvenThreadsFromEnv = static_cast<int>( ParamFromEnvWithDefault("TF_RUN_HANDLER_EXP_DIST_MIN_EVEN_THREADS", 1)); static const int kMaxEvenThreadsFromEnv = static_cast<int>( ParamFromEnvWithDefault("TF_RUN_HANDLER_EXP_DIST_MAX_EVEN_THREADS", 3)); std::vector<int> request_idx_list; request_idx_list.resize(num_threads); int min_threads_per_request = num_threads * kCapacityFractionForEvenDistribution / num_active_requests; min_threads_per_request = std::max(kMinEvenThreadsFromEnv, min_threads_per_request); min_threads_per_request = std::min(kMaxEvenThreadsFromEnv, min_threads_per_request); int num_remaining_threads = std::max(0, num_threads - num_active_requests * min_threads_per_request); int request_idx = -1; int num_threads_next_request = 0; for (int tid = 0; tid < num_threads; ++tid) { if (num_threads_next_request <= 0) { request_idx = std::min(num_active_requests - 1, request_idx + 1); int num_extra_threads_next_request = std::ceil(num_remaining_threads * (kPowerBase - 1.0) / kPowerBase); num_remaining_threads -= num_extra_threads_next_request; num_threads_next_request = num_extra_threads_next_request + min_threads_per_request; } num_threads_next_request--; request_idx_list[tid] = request_idx; } return request_idx_list; } }
#include "tensorflow/core/framework/run_handler_util.h" #include <vector> #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { void VerifySchedulingRanges(int num_active_requests, int num_threads, int min_threads_per_request, bool print_stats = false) { if (print_stats) { LOG(INFO) << "Test case# num_active_requests: " << num_active_requests << " num_threads: " << num_threads << " min_threads: " << min_threads_per_request; } std::vector<std::uint_fast32_t> start(num_active_requests); std::vector<std::uint_fast32_t> end(num_active_requests); ComputeInterOpSchedulingRanges(num_active_requests, num_threads, min_threads_per_request, &start, &end); string range_str = ""; for (int i = 0; i < num_active_requests; ++i) { if (i > 0) range_str += " "; range_str += strings::StrCat("[", start[i], ", ", end[i], ")"); ASSERT_GE(start[i], 0) << range_str; ASSERT_LE(end[i], num_threads) << range_str; if (i > 0) { ASSERT_GE(end[i - 1] - start[i - 1], end[i] - start[i]) << range_str; ASSERT_GE(end[i - 1], start[i]) << range_str; } ASSERT_GE((end[i] - start[i]), min_threads_per_request) << range_str; float entry_weight = num_active_requests - i; float total_weight = 0.5f * num_active_requests * (num_active_requests + 1); float thread_demand = (entry_weight * num_threads) / total_weight; if (thread_demand > min_threads_per_request) { ASSERT_NEAR(end[i] - start[i], thread_demand, 1.0) << "Ranges: " << range_str << " thread_demand: " << thread_demand << " i: " << i; } } ASSERT_EQ(end[num_active_requests - 1], num_threads); ASSERT_EQ(start[0], 0); if (print_stats) { LOG(INFO) << "Assigned ranges: " << range_str; } } TEST(RunHandlerUtilTest, TestComputeInterOpSchedulingRanges) { const int kMinThreadsPerRequestBound = 12; const int kMaxActiveRequests = 128; const int kMaxThreads = 128; for (int min_threads_per_request = 1; min_threads_per_request <= kMinThreadsPerRequestBound; ++min_threads_per_request) { for (int num_active_requests = 1; num_active_requests <= kMaxActiveRequests; ++num_active_requests) { for (int num_threads = min_threads_per_request; num_threads <= kMaxThreads; ++num_threads) { VerifySchedulingRanges(num_active_requests, num_threads, min_threads_per_request); } } } } TEST(RunHandlerUtilTest, TestComputeInterOpStealingRanges) { int num_inter_op_threads = 9; std::vector<std::uint_fast32_t> start_vec(num_inter_op_threads); std::vector<std::uint_fast32_t> end_vec(num_inter_op_threads); ComputeInterOpStealingRanges(num_inter_op_threads, 6, &start_vec, &end_vec); int stealing_ranges[2][2] = {{0, 6}, {3, 9}}; for (int i = 0; i < num_inter_op_threads; ++i) { int expected_start = stealing_ranges[i / 6][0]; int expected_end = stealing_ranges[i / 6][1]; string message = strings::StrCat("Stealing range of thread ", i, " should be [", expected_start, ", ", expected_end, "]"); ASSERT_EQ(start_vec[i], expected_start) << message; ASSERT_EQ(end_vec[i], expected_end) << message; } } TEST(RunHandlerUtilTest, TestExponentialRequestDistribution) { int num_active_requests = 3; int num_threads = 10; std::vector<int> actual_distribution = ChooseRequestsWithExponentialDistribution(num_active_requests, num_threads); std::vector<int> expected_distribution{0, 0, 0, 0, 0, 1, 1, 1, 2, 2}; ASSERT_EQ(actual_distribution, expected_distribution); } TEST(RunHandlerUtilTest, TestParamFromEnvWithDefault) { std::vector<double> result = ParamFromEnvWithDefault( "RUN_HANDLER_TEST_ENV", std::vector<double>{0, 0, 0}); EXPECT_EQ(result.size(), 3); EXPECT_EQ(result[0], 0); EXPECT_EQ(result[1], 0); EXPECT_EQ(result[2], 0); std::vector<int> result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV", std::vector<int>{0, 0, 0}); EXPECT_EQ(result2.size(), 3); EXPECT_EQ(result2[0], 0); EXPECT_EQ(result2[1], 0); EXPECT_EQ(result2[2], 0); bool result3 = ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false); EXPECT_EQ(result3, false); EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV", "1,2,3", true), 0); result = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV", std::vector<double>{0, 0, 0}); EXPECT_EQ(result.size(), 3); EXPECT_EQ(result[0], 1); EXPECT_EQ(result[1], 2); EXPECT_EQ(result[2], 3); result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV", std::vector<int>{0, 0, 0}); EXPECT_EQ(result.size(), 3); EXPECT_EQ(result2[0], 1); EXPECT_EQ(result2[1], 2); EXPECT_EQ(result2[2], 3); EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV_BOOL", "true", true), 0); result3 = ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false); EXPECT_EQ(result3, true); } } }
bool ParamFromEnvBoolWithDefault(const char* var_name, bool default_value) { const char* val = std::getenv(var_name); return (val) ? str_util::Lowercase(val) == "true" : default_value; }
TEST(RunHandlerUtilTest, TestParamFromEnvWithDefault) { std::vector<double> result = ParamFromEnvWithDefault( "RUN_HANDLER_TEST_ENV", std::vector<double>{0, 0, 0}); EXPECT_EQ(result.size(), 3); EXPECT_EQ(result[0], 0); EXPECT_EQ(result[1], 0); EXPECT_EQ(result[2], 0); std::vector<int> result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV", std::vector<int>{0, 0, 0}); EXPECT_EQ(result2.size(), 3); EXPECT_EQ(result2[0], 0); EXPECT_EQ(result2[1], 0); EXPECT_EQ(result2[2], 0); bool result3 = ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false); EXPECT_EQ(result3, false); EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV", "1,2,3", true), 0); result = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV", std::vector<double>{0, 0, 0}); EXPECT_EQ(result.size(), 3); EXPECT_EQ(result[0], 1); EXPECT_EQ(result[1], 2); EXPECT_EQ(result[2], 3); result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV", std::vector<int>{0, 0, 0}); EXPECT_EQ(result.size(), 3); EXPECT_EQ(result2[0], 1); EXPECT_EQ(result2[1], 2); EXPECT_EQ(result2[2], 3); EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV_BOOL", "true", true), 0); result3 = ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false); EXPECT_EQ(result3, true); }
#include "arolla/qtype/standard_type_properties/common_qtype.h" #include <algorithm> #include <array> #include <cstdint> #include "absl/algorithm/container.h" #include "absl/types/span.h" #include "arolla/qtype/array_like/array_like_qtype.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/shape_qtype.h" #include "arolla/qtype/standard_type_properties/properties.h" #include "arolla/util/status_macros_backport.h" namespace arolla { namespace { const QType* CommonScalarQType(const QType* lhs_qtype, const QType* rhs_qtype) { if (lhs_qtype == rhs_qtype) { return lhs_qtype; } { static const std::array integral_qtypes = {GetQType<int64_t>(), GetQType<int32_t>()}; auto lhs_it = absl::c_find(integral_qtypes, lhs_qtype); auto rhs_it = absl::c_find(integral_qtypes, rhs_qtype); if (lhs_it != integral_qtypes.end() && rhs_it != integral_qtypes.end()) { return *std::min(lhs_it, rhs_it); } } { static const std::array floating_point_qtypes = { GetQType<double>(), GetQType<float>(), GetWeakFloatQType(), }; auto lhs_it = absl::c_find(floating_point_qtypes, lhs_qtype); auto rhs_it = absl::c_find(floating_point_qtypes, rhs_qtype); if (lhs_it != floating_point_qtypes.end() && rhs_it != floating_point_qtypes.end()) { return *std::min(lhs_it, rhs_it); } } return nullptr; } const ShapeQType* CommonShapeQType(const ShapeQType* lhs_qtype, const ShapeQType* rhs_qtype, bool enable_broadcasting) { if (lhs_qtype == rhs_qtype) { return rhs_qtype; } if (!enable_broadcasting && (IsArrayLikeShapeQType(lhs_qtype) || IsArrayLikeShapeQType(rhs_qtype))) { return nullptr; } if (lhs_qtype == GetQType<ScalarShape>()) { return rhs_qtype; } if (rhs_qtype == GetQType<ScalarShape>()) { return lhs_qtype; } if (lhs_qtype == GetQType<OptionalScalarShape>()) { return rhs_qtype; } if (rhs_qtype == GetQType<OptionalScalarShape>()) { return lhs_qtype; } return nullptr; } } const QType* CommonQType(const QType* lhs_qtype, const QType* rhs_qtype, bool enable_broadcasting) { if (lhs_qtype == nullptr || rhs_qtype == nullptr) { return nullptr; } if (lhs_qtype == rhs_qtype) { return lhs_qtype; } ASSIGN_OR_RETURN(auto lhs_scalar_qtype, GetScalarQType(lhs_qtype), nullptr); ASSIGN_OR_RETURN(auto rhs_scalar_qtype, GetScalarQType(rhs_qtype), nullptr); const auto* scalar_qtype = CommonScalarQType(lhs_scalar_qtype, rhs_scalar_qtype); if (!scalar_qtype) { return nullptr; } ASSIGN_OR_RETURN(auto lhs_shape_qtype, GetShapeQType(lhs_qtype), nullptr); ASSIGN_OR_RETURN(auto rhs_shape_qtype, GetShapeQType(rhs_qtype), nullptr); const auto* shape_qtype = CommonShapeQType(lhs_shape_qtype, rhs_shape_qtype, enable_broadcasting); if (!shape_qtype) { return nullptr; } return shape_qtype->WithValueQType(scalar_qtype).value_or(nullptr); } bool CanCastImplicitly(QTypePtr from_qtype, QTypePtr to_qtype, bool enable_broadcasting) { return to_qtype != nullptr && CommonQType(from_qtype, to_qtype, enable_broadcasting) == to_qtype; } const QType* CommonQType(absl::Span<const QType* const> qtypes, bool enable_broadcasting) { if (qtypes.empty()) { return nullptr; } const QType* result = qtypes[0]; for (const QType* qtype : qtypes.subspan(1)) { result = CommonQType(result, qtype, enable_broadcasting); } return result; } const QType* BroadcastQType(absl::Span<QType const* const> target_qtypes, const QType* qtype) { if (absl::c_any_of(target_qtypes, [](auto* qtype) { return qtype == nullptr; }) || qtype == nullptr) { return nullptr; } const ShapeQType* shape_qtype = GetShapeQType(qtype).value_or(nullptr); for (const auto* target_qtype : target_qtypes) { shape_qtype = CommonShapeQType( shape_qtype, GetShapeQType(target_qtype).value_or(nullptr), true); } if (shape_qtype == nullptr) { return nullptr; } ASSIGN_OR_RETURN(auto scalar_qtype, GetScalarQType(qtype), nullptr); return shape_qtype->WithValueQType(scalar_qtype).value_or(nullptr); } }
#include "arolla/qtype/standard_type_properties/common_qtype.h" #include <algorithm> #include <cstdint> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "arolla/array/qtype/types.h" #include "arolla/dense_array/qtype/types.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/optional_qtype.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/tuple_qtype.h" #include "arolla/qtype/weak_qtype.h" #include "arolla/util/bytes.h" #include "arolla/util/meta.h" #include "arolla/util/unit.h" namespace arolla { namespace { using ::testing::IsFalse; using ::testing::IsNull; using ::testing::IsTrue; const QType* ReferenceCommonQType(const QType* arg0, const QType* arg1, bool enable_broadcasting_) { if (arg0 == arg1) { return arg0; } const QType* result = nullptr; const auto gen_result = [&](QTypePtr a0, QTypePtr a1, QTypePtr r) { if (a0 == arg0 && a1 == arg1) { result = r; } }; const auto gen_results = [&](QTypePtr a0, QTypePtr a1, QTypePtr r) { ASSERT_OK_AND_ASSIGN(auto a0_optional, ToOptionalQType(a0)); ASSERT_OK_AND_ASSIGN(auto a0_dense_array, GetDenseArrayQTypeByValueQType(a0)); ASSERT_OK_AND_ASSIGN(auto a0_array, GetArrayQTypeByValueQType(a0)); ASSERT_OK_AND_ASSIGN(auto a1_optional, ToOptionalQType(a1)); ASSERT_OK_AND_ASSIGN(auto a1_dense_array, GetDenseArrayQTypeByValueQType(a1)); ASSERT_OK_AND_ASSIGN(auto a1_array, GetArrayQTypeByValueQType(a1)); ASSERT_OK_AND_ASSIGN(auto r_optional, ToOptionalQType(r)); ASSERT_OK_AND_ASSIGN(auto r_dense_array, GetDenseArrayQTypeByValueQType(r)); ASSERT_OK_AND_ASSIGN(auto r_array, GetArrayQTypeByValueQType(r)); gen_result(a0, a1, r); gen_result(a0, a1_optional, r_optional); gen_result(a0_optional, a1_optional, r_optional); gen_result(a0_optional, a1, r_optional); gen_result(a0_dense_array, a1_dense_array, r_dense_array); gen_result(a0_array, a1_array, r_array); if (enable_broadcasting_) { gen_result(a0, a1_dense_array, r_dense_array); gen_result(a0_optional, a1_dense_array, r_dense_array); gen_result(a0, a1_array, r_array); gen_result(a0_optional, a1_array, r_array); gen_result(a0_dense_array, a1_optional, r_dense_array); gen_result(a0_dense_array, a1, r_dense_array); gen_result(a0_array, a1_optional, r_array); gen_result(a0_array, a1, r_array); } }; meta::foreach_type<ScalarTypes>([&](auto meta_type) { auto x = GetQType<typename decltype(meta_type)::type>(); gen_results(x, x, x); }); static const auto integral_qtypes = { GetQType<int32_t>(), GetQType<int64_t>(), }; for (auto it = integral_qtypes.begin(); result == nullptr && it != integral_qtypes.end(); ++it) { for (auto jt = integral_qtypes.begin(); result == nullptr && jt != integral_qtypes.end(); ++jt) { gen_results(*it, *jt, *std::max(it, jt)); } } static const auto floating_qtypes = { GetWeakFloatQType(), GetQType<float>(), GetQType<double>(), }; for (auto it = floating_qtypes.begin(); result == nullptr && it != floating_qtypes.end(); ++it) { for (auto jt = floating_qtypes.begin(); result == nullptr && jt != floating_qtypes.end(); ++jt) { gen_results(*it, *jt, *std::max(it, jt)); } } return result; } class CommonQTypeMultipleParametersTests : public ::testing::TestWithParam<bool> { protected: CommonQTypeMultipleParametersTests() { meta::foreach_type<ScalarTypes>([&](auto meta_type) { using T = typename decltype(meta_type)::type; known_qtypes_.push_back(GetQType<T>()); known_qtypes_.push_back(GetOptionalQType<T>()); known_qtypes_.push_back(GetDenseArrayQType<T>()); known_qtypes_.push_back(GetArrayQType<T>()); }); known_qtypes_.push_back(nullptr); known_qtypes_.push_back(GetDenseArrayWeakFloatQType()); known_qtypes_.push_back(GetArrayWeakFloatQType()); known_qtypes_.push_back(MakeTupleQType({})); enable_broadcasting_ = GetParam(); } std::vector<const QType*> known_qtypes_; bool enable_broadcasting_; }; TEST_P(CommonQTypeMultipleParametersTests, VsReferenceImplementation) { for (auto lhs : known_qtypes_) { for (auto rhs : known_qtypes_) { EXPECT_EQ(CommonQType(lhs, rhs, enable_broadcasting_), ReferenceCommonQType(lhs, rhs, enable_broadcasting_)) << "lhs=" << (lhs ? lhs->name() : "nullptr") << ", rhs=" << (rhs ? rhs->name() : "nullptr"); } } } TEST_P(CommonQTypeMultipleParametersTests, SemiLatticeProperties) { for (auto arg_0 : known_qtypes_) { EXPECT_EQ( CommonQType(arg_0, arg_0, enable_broadcasting_), arg_0); for (auto arg_1 : known_qtypes_) { EXPECT_EQ( CommonQType(arg_0, arg_1, enable_broadcasting_), CommonQType(arg_1, arg_0, enable_broadcasting_)); for (auto arg_2 : known_qtypes_) { EXPECT_EQ( CommonQType(CommonQType(arg_0, arg_1, enable_broadcasting_), arg_2, enable_broadcasting_), CommonQType(arg_0, CommonQType(arg_1, arg_2, enable_broadcasting_), enable_broadcasting_)); } } } } INSTANTIATE_TEST_SUITE_P(CommonQTypeTests, CommonQTypeMultipleParametersTests, ::testing::Values(false, true)); class CommonQTypeTest : public ::testing::Test { protected: static void SetUpTestCase() { meta::foreach_type<ScalarTypes>([&](auto meta_type) { GetQType<typename decltype(meta_type)::type>(); GetOptionalQType<typename decltype(meta_type)::type>(); GetDenseArrayQType<typename decltype(meta_type)::type>(); }); } }; TEST_F(CommonQTypeTest, OnSpans) { EXPECT_THAT(CommonQType({}, true), IsNull()); EXPECT_EQ(CommonQType({GetQType<int64_t>()}, true), GetQType<int64_t>()); EXPECT_THAT( CommonQType({nullptr, GetQType<int64_t>()}, true), IsNull()); EXPECT_THAT( CommonQType({GetQType<int64_t>(), nullptr}, true), IsNull()); EXPECT_EQ(CommonQType({GetQType<int64_t>(), GetOptionalQType<int32_t>()}, true), GetOptionalQType<int64_t>()); EXPECT_EQ(CommonQType({GetQType<int64_t>(), GetOptionalQType<int32_t>(), GetDenseArrayQType<int32_t>()}, true), GetDenseArrayQType<int64_t>()); EXPECT_EQ( CommonQType(GetDenseArrayQType<int32_t>(), GetOptionalQType<int64_t>(), true), GetDenseArrayQType<int64_t>()); EXPECT_THAT(CommonQType({GetQType<int64_t>(), GetOptionalQType<int32_t>(), GetDenseArrayQType<int32_t>()}, false), IsNull()); } TEST_F(CommonQTypeTest, WeakQType) { EXPECT_EQ(CommonQType(GetQType<double>(), GetWeakFloatQType(), true), GetQType<double>()); EXPECT_EQ(CommonQType(GetQType<float>(), GetWeakFloatQType(), true), GetQType<float>()); EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetWeakFloatQType(), true), GetWeakFloatQType()); EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetWeakFloatQType(), true), GetOptionalWeakFloatQType()); EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetQType<double>(), true), GetOptionalQType<double>()); EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetQType<float>(), true), GetOptionalQType<float>()); EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetOptionalQType<double>(), true), GetOptionalQType<double>()); EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetOptionalQType<float>(), true), GetOptionalQType<float>()); EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetOptionalQType<double>(), true), GetOptionalQType<double>()); EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetOptionalQType<float>(), true), GetOptionalQType<float>()); EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetArrayQType<double>(), true), GetArrayQType<double>()); EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetArrayQType<float>(), true), GetArrayQType<float>()); EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetArrayQType<double>(), true), GetArrayQType<double>()); EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetArrayQType<float>(), true), GetArrayQType<float>()); } class CanCastImplicitlyTest : public ::testing::Test { protected: static void SetUpTestCase() { meta::foreach_type<ScalarTypes>([&](auto meta_type) { GetQType<typename decltype(meta_type)::type>(); GetOptionalQType<typename decltype(meta_type)::type>(); GetDenseArrayQType<typename decltype(meta_type)::type>(); }); } }; TEST_F(CanCastImplicitlyTest, OnScalars) { EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<int64_t>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetQType<double>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetOptionalQType<float>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetOptionalQType<double>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetQType<int64_t>(), GetQType<int32_t>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<float>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<uint64_t>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), nullptr, false), IsFalse()); EXPECT_THAT(CanCastImplicitly(nullptr, GetQType<int32_t>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(nullptr, nullptr, false), IsFalse()); } TEST_F(CanCastImplicitlyTest, WithBroadcasting) { EXPECT_THAT( CanCastImplicitly(GetQType<int32_t>(), GetDenseArrayQType<int32_t>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetOptionalQType<int32_t>(), GetDenseArrayQType<int32_t>(), false), IsFalse()); EXPECT_THAT( CanCastImplicitly(GetQType<int32_t>(), GetDenseArrayQType<int32_t>(), true), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetOptionalQType<int32_t>(), GetDenseArrayQType<int32_t>(), true), IsTrue()); } TEST_F(CanCastImplicitlyTest, WeakQType) { EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetWeakFloatQType(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetQType<double>(), GetWeakFloatQType(), false), IsFalse()); EXPECT_THAT( CanCastImplicitly(GetQType<int32_t>(), GetWeakFloatQType(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetQType<float>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetQType<double>(), false), IsTrue()); EXPECT_THAT( CanCastImplicitly(GetWeakFloatQType(), GetQType<int32_t>(), false), IsFalse()); EXPECT_THAT( CanCastImplicitly(GetWeakFloatQType(), GetOptionalQType<float>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetOptionalWeakFloatQType(), GetOptionalQType<double>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetArrayWeakFloatQType(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetArrayWeakFloatQType(), true), IsTrue()); EXPECT_THAT( CanCastImplicitly(GetOptionalWeakFloatQType(), GetArrayWeakFloatQType(), true), IsTrue()); EXPECT_THAT( CanCastImplicitly(GetWeakFloatQType(), GetDenseArrayQType<float>(), true), IsTrue()); EXPECT_THAT( CanCastImplicitly(GetWeakFloatQType(), GetDenseArrayQType<double>(), true), IsTrue()); } class BroadcastQTypeTests : public ::testing::Test { protected: static void SetUpTestCase() { meta::foreach_type<ScalarTypes>([&](auto meta_type) { using T = typename decltype(meta_type)::type; GetQType<T>(); GetOptionalQType<T>(); GetDenseArrayQType<T>(); GetArrayQType<T>(); }); GetDenseArrayWeakFloatQType(); GetArrayWeakFloatQType(); } }; TEST_F(BroadcastQTypeTests, Empty) { ASSERT_THAT(BroadcastQType({}, nullptr), IsNull()); } TEST_F(BroadcastQTypeTests, SingleScalarType) { ASSERT_EQ(BroadcastQType({}, GetQType<int32_t>()), GetQType<int32_t>()); } TEST_F(BroadcastQTypeTests, NullHandling) { ASSERT_THAT(BroadcastQType({nullptr}, GetQType<int32_t>()), IsNull()); ASSERT_THAT(BroadcastQType({GetQType<int32_t>()}, nullptr), IsNull()); ASSERT_THAT( BroadcastQType({GetQType<int32_t>(), nullptr}, GetQType<int32_t>()), IsNull()); } TEST_F(BroadcastQTypeTests, ScalarAndOptional) { ASSERT_EQ(BroadcastQType({GetOptionalQType<int32_t>()}, GetQType<int64_t>()), GetOptionalQType<int64_t>()); ASSERT_EQ(BroadcastQType({GetQType<int64_t>()}, GetOptionalQType<int32_t>()), GetOptionalQType<int32_t>()); } TEST_F(BroadcastQTypeTests, ArrayAndDenseArray) { EXPECT_THAT( BroadcastQType({GetArrayQType<float>()}, GetDenseArrayQType<float>()), IsNull()); EXPECT_THAT( BroadcastQType({GetArrayQType<float>(), GetDenseArrayQType<float>()}, GetQType<float>()), IsNull()); } TEST_F(BroadcastQTypeTests, Basic) { ASSERT_EQ( BroadcastQType({GetOptionalQType<float>(), GetDenseArrayQType<Bytes>()}, GetQType<int32_t>()), GetDenseArrayQType<int32_t>()); } TEST_F(BroadcastQTypeTests, WeakFloat) { ASSERT_EQ(BroadcastQType({GetDenseArrayQType<Unit>()}, GetWeakFloatQType()), GetDenseArrayWeakFloatQType()); ASSERT_EQ( BroadcastQType({GetDenseArrayQType<Unit>()}, GetOptionalWeakFloatQType()), GetDenseArrayWeakFloatQType()); ASSERT_EQ(BroadcastQType({GetArrayQType<Unit>()}, GetWeakFloatQType()), GetArrayWeakFloatQType()); ASSERT_EQ( BroadcastQType({GetArrayQType<Unit>()}, GetOptionalWeakFloatQType()), GetArrayWeakFloatQType()); } } }
bool CanCastImplicitly(QTypePtr from_qtype, QTypePtr to_qtype, bool enable_broadcasting) { return to_qtype != nullptr && CommonQType(from_qtype, to_qtype, enable_broadcasting) == to_qtype; }
TEST_F(CanCastImplicitlyTest, OnScalars) { EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<int64_t>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetQType<double>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetOptionalQType<float>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetOptionalQType<double>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetQType<int64_t>(), GetQType<int32_t>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<float>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<uint64_t>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), nullptr, false), IsFalse()); EXPECT_THAT(CanCastImplicitly(nullptr, GetQType<int32_t>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(nullptr, nullptr, false), IsFalse()); } TEST_F(CanCastImplicitlyTest, WithBroadcasting) { EXPECT_THAT( CanCastImplicitly(GetQType<int32_t>(), GetDenseArrayQType<int32_t>(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetOptionalQType<int32_t>(), GetDenseArrayQType<int32_t>(), false), IsFalse()); EXPECT_THAT( CanCastImplicitly(GetQType<int32_t>(), GetDenseArrayQType<int32_t>(), true), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetOptionalQType<int32_t>(), GetDenseArrayQType<int32_t>(), true), IsTrue()); } TEST_F(CanCastImplicitlyTest, WeakQType) { EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetWeakFloatQType(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetQType<double>(), GetWeakFloatQType(), false), IsFalse()); EXPECT_THAT( CanCastImplicitly(GetQType<int32_t>(), GetWeakFloatQType(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetQType<float>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetQType<double>(), false), IsTrue()); EXPECT_THAT( CanCastImplicitly(GetWeakFloatQType(), GetQType<int32_t>(), false), IsFalse()); EXPECT_THAT( CanCastImplicitly(GetWeakFloatQType(), GetOptionalQType<float>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetOptionalWeakFloatQType(), GetOptionalQType<double>(), false), IsTrue()); EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetArrayWeakFloatQType(), false), IsFalse()); EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetArrayWeakFloatQType(), true), IsTrue()); EXPECT_THAT( CanCastImplicitly(GetOptionalWeakFloatQType(), GetArrayWeakFloatQType(), true), IsTrue()); EXPECT_THAT( CanCastImplicitly(GetWeakFloatQType(), GetDenseArrayQType<float>(), true), IsTrue()); EXPECT_THAT( CanCastImplicitly(GetWeakFloatQType(), GetDenseArrayQType<double>(), true), IsTrue()); }
#include "tensorstore/kvstore/generation.h" #include <stddef.h> #include <stdint.h> #include <cstring> #include <ostream> #include <string_view> #include <utility> #include "absl/time/time.h" #include "tensorstore/serialization/absl_time.h" #include "tensorstore/serialization/serialization.h" #include "tensorstore/util/quote_string.h" namespace tensorstore { namespace { std::string_view CanonicalGeneration(std::string_view generation) { size_t new_size = generation.size(); while (new_size && generation[new_size - 1] == 0) { --new_size; } return generation.substr(0, new_size); } } std::ostream& operator<<(std::ostream& os, const StorageGeneration& g) { return os << QuoteString(g.value); } std::ostream& operator<<(std::ostream& os, const TimestampedStorageGeneration& x) { return os << "{generation=" << x.generation << ", time=" << x.time << "}"; } bool StorageGeneration::Equivalent(std::string_view a, std::string_view b) { return CanonicalGeneration(a) == CanonicalGeneration(b); } StorageGeneration StorageGeneration::Clean(StorageGeneration generation) { size_t new_size = generation.value.size(); while (new_size) { if (generation.value[new_size - 1] & kBaseGeneration) { generation.value[new_size - 1] &= ~(kDirty | kNewlyDirty); break; } --new_size; } generation.value.resize(new_size); return generation; } void StorageGeneration::MarkDirty() { if (value.empty()) { value = (kDirty | kNewlyDirty); } else { value.back() |= (kDirty | kNewlyDirty); } } StorageGeneration StorageGeneration::Dirty(StorageGeneration generation) { if (generation.value.empty()) { return StorageGeneration{std::string(1, kDirty)}; } generation.value.back() |= kDirty; return generation; } StorageGeneration StorageGeneration::FromUint64(uint64_t n) { StorageGeneration generation; generation.value.resize(9); std::memcpy(generation.value.data(), &n, 8); generation.value[8] = kBaseGeneration; return generation; } StorageGeneration StorageGeneration::FromString(std::string_view s) { StorageGeneration generation; generation.value.reserve(s.size() + 1); generation.value += s; generation.value += kBaseGeneration; return generation; } StorageGeneration StorageGeneration::Condition( const StorageGeneration& generation, StorageGeneration condition) { if (IsDirty(generation)) { return Dirty(Clean(std::move(condition))); } return Clean(std::move(condition)); } bool StorageGeneration::IsDirty(const StorageGeneration& generation) { auto canonical = CanonicalGeneration(generation.value); return !canonical.empty() && (canonical.back() & kDirty); } bool StorageGeneration::IsInnerLayerDirty(const StorageGeneration& generation) { return !generation.value.empty() && (generation.value.back() & kDirty); } StorageGeneration StorageGeneration::AddLayer(StorageGeneration generation) { generation.value.resize(generation.value.size() + 1); return generation; } bool StorageGeneration::IsConditional(const StorageGeneration& generation) { size_t new_size = generation.value.size(); while (new_size && !(generation.value[new_size - 1] & kBaseGeneration)) { --new_size; } return (new_size != 0); } bool StorageGeneration::IsConditionalOn(const StorageGeneration& generation, const StorageGeneration& condition) { size_t size = generation.value.size(); return size != 0 && condition.value.size() == size && std::memcmp(generation.value.data(), condition.value.data(), size - 1) == 0 && (generation.value[size] | kDirty | kNewlyDirty) == (condition.value[size] | kDirty | kNewlyDirty); } std::string_view StorageGeneration::DecodeString( const StorageGeneration& generation) { std::string_view s = generation.value; if (s.empty()) return {}; while (true) { bool start_of_tags = static_cast<bool>(s.back() & kBaseGeneration); s.remove_suffix(1); if (start_of_tags || s.empty()) break; } return s; } } TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION( tensorstore::StorageGeneration, tensorstore::serialization::ApplyMembersSerializer< tensorstore::StorageGeneration>()) TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION( tensorstore::TimestampedStorageGeneration, tensorstore::serialization::ApplyMembersSerializer< tensorstore::TimestampedStorageGeneration>())
#include "tensorstore/kvstore/generation.h" #include <gtest/gtest.h> #include "tensorstore/serialization/serialization.h" #include "tensorstore/serialization/test_util.h" namespace { using ::tensorstore::StorageGeneration; using ::tensorstore::TimestampedStorageGeneration; using ::tensorstore::serialization::TestSerializationRoundTrip; TEST(StorageGenerationTest, Basic) { EXPECT_TRUE(StorageGeneration::IsUnknown(StorageGeneration::Unknown())); EXPECT_FALSE(StorageGeneration::IsUnknown(StorageGeneration::NoValue())); EXPECT_FALSE(StorageGeneration::IsNoValue(StorageGeneration::Unknown())); EXPECT_TRUE(StorageGeneration::IsNoValue(StorageGeneration::NoValue())); EXPECT_EQ(StorageGeneration{std::string{StorageGeneration::kDirty}}, StorageGeneration::Dirty(StorageGeneration::Unknown())); StorageGeneration gen{ std::string{1, 2, 3, 4, 5, StorageGeneration::kBaseGeneration}}; StorageGeneration local_gen{std::string{ 1, 2, 3, 4, 5, StorageGeneration::kBaseGeneration | StorageGeneration::kDirty}}; EXPECT_FALSE(StorageGeneration::IsUnknown(gen)); EXPECT_FALSE(StorageGeneration::IsUnknown(local_gen)); EXPECT_TRUE(StorageGeneration::IsClean(gen)); EXPECT_FALSE(StorageGeneration::IsClean(local_gen)); EXPECT_FALSE(StorageGeneration::IsDirty(gen)); EXPECT_TRUE(StorageGeneration::IsDirty(local_gen)); EXPECT_EQ(local_gen, StorageGeneration::Dirty(gen)); EXPECT_EQ(gen, StorageGeneration::Clean(local_gen)); EXPECT_TRUE(StorageGeneration::IsClean(StorageGeneration::NoValue())); EXPECT_FALSE(StorageGeneration::IsClean(StorageGeneration::Unknown())); EXPECT_EQ(StorageGeneration::NoValue(), StorageGeneration::Clean(StorageGeneration::NoValue())); } TEST(StorageGenerationTest, Uint64) { auto g = StorageGeneration::FromUint64(12345); EXPECT_TRUE(StorageGeneration::IsUint64(g)); EXPECT_EQ(12345, StorageGeneration::ToUint64(g)); EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Unknown())); EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::NoValue())); EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Invalid())); } TEST(StorageGenerationSerializationTest, Basic) { TestSerializationRoundTrip(StorageGeneration::Unknown()); TestSerializationRoundTrip(StorageGeneration::FromUint64(12345)); } TEST(TimestampedStorageGenerationSerializationTest, Basic) { TestSerializationRoundTrip(TimestampedStorageGeneration( StorageGeneration::FromUint64(12345), absl::InfinitePast())); TestSerializationRoundTrip(TimestampedStorageGeneration( StorageGeneration::FromUint64(12345), absl::InfiniteFuture())); } TEST(StorageGenerationTest, IsCleanValidValue) { EXPECT_FALSE( StorageGeneration::IsCleanValidValue(StorageGeneration::Unknown())); EXPECT_FALSE( StorageGeneration::IsCleanValidValue(StorageGeneration::NoValue())); EXPECT_FALSE( StorageGeneration::IsCleanValidValue(StorageGeneration::Invalid())); EXPECT_TRUE(StorageGeneration::IsCleanValidValue( StorageGeneration::FromString("abc"))); EXPECT_TRUE( StorageGeneration::IsCleanValidValue(StorageGeneration::FromUint64(42))); } TEST(StorageGenerationTest, DecodeString) { EXPECT_EQ("abc", StorageGeneration::DecodeString( StorageGeneration::FromString("abc"))); } }
StorageGeneration StorageGeneration::FromUint64(uint64_t n) { StorageGeneration generation; generation.value.resize(9); std::memcpy(generation.value.data(), &n, 8); generation.value[8] = kBaseGeneration; return generation; }
TEST(StorageGenerationTest, Uint64) { auto g = StorageGeneration::FromUint64(12345); EXPECT_TRUE(StorageGeneration::IsUint64(g)); EXPECT_EQ(12345, StorageGeneration::ToUint64(g)); EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Unknown())); EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::NoValue())); EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Invalid())); } TEST(StorageGenerationSerializationTest, Basic) { TestSerializationRoundTrip(StorageGeneration::Unknown()); TestSerializationRoundTrip(StorageGeneration::FromUint64(12345)); } TEST(TimestampedStorageGenerationSerializationTest, Basic) { TestSerializationRoundTrip(TimestampedStorageGeneration( StorageGeneration::FromUint64(12345), absl::InfinitePast())); TestSerializationRoundTrip(TimestampedStorageGeneration( StorageGeneration::FromUint64(12345), absl::InfiniteFuture())); }